code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel, VQModel
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __a ( _snake_case ):
def __init__( self : List[str] ,lowerCamelCase : VQModel ,lowerCamelCase : UNetaDModel ,lowerCamelCase : DDIMScheduler ):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=__A ,unet=__A ,scheduler=__A )
@torch.no_grad()
def __call__( self : Optional[Any] ,lowerCamelCase : int = 1 ,lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase : float = 0.0 ,lowerCamelCase : int = 50 ,lowerCamelCase : Optional[str] = "pil" ,lowerCamelCase : bool = True ,**lowerCamelCase : int ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = randn_tensor(
(batch_size, self.unet.config.in_channels, self.unet.config.sample_size, self.unet.config.sample_size) ,generator=__A ,)
__SCREAMING_SNAKE_CASE = latents.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma
self.scheduler.set_timesteps(__A )
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
__SCREAMING_SNAKE_CASE = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__SCREAMING_SNAKE_CASE = {}
if accepts_eta:
__SCREAMING_SNAKE_CASE = eta
for t in self.progress_bar(self.scheduler.timesteps ):
__SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(__A ,__A )
# predict the noise residual
__SCREAMING_SNAKE_CASE = self.unet(__A ,__A ).sample
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE = self.scheduler.step(__A ,__A ,__A ,**__A ).prev_sample
# decode the image latents with the VAE
__SCREAMING_SNAKE_CASE = self.vqvae.decode(__A ).sample
__SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 ,1 )
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(__A )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=__A )
| 717 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 1 , __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
for divide_by_number in range(__UpperCAmelCase , digit + 1 ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = divide_by_number
else:
has_been_divided.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
'''simple docstring'''
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a :
def __init__( self : List[str] ,lowerCamelCase : int ,lowerCamelCase : Tuple=13 ,lowerCamelCase : int=32 ,lowerCamelCase : Optional[Any]=3 ,lowerCamelCase : int=4 ,lowerCamelCase : Tuple=[10, 20, 30, 40] ,lowerCamelCase : List[str]=[2, 2, 3, 2] ,lowerCamelCase : Tuple=True ,lowerCamelCase : List[Any]=True ,lowerCamelCase : Any=37 ,lowerCamelCase : Optional[Any]="gelu" ,lowerCamelCase : str=10 ,lowerCamelCase : Optional[int]=0.02 ,lowerCamelCase : Optional[int]=["stage2", "stage3", "stage4"] ,lowerCamelCase : Optional[Any]=[2, 3, 4] ,lowerCamelCase : List[Any]=None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = num_stages
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_labels
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = out_features
__SCREAMING_SNAKE_CASE = out_indices
__SCREAMING_SNAKE_CASE = scope
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.num_labels )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=_lowercase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[str] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ConvNextVaModel(config=_lowercase )
model.to(_lowercase )
model.eval()
__SCREAMING_SNAKE_CASE = model(_lowercase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Dict ,lowerCamelCase : Optional[int] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ConvNextVaForImageClassification(_lowercase )
model.to(_lowercase )
model.eval()
__SCREAMING_SNAKE_CASE = model(_lowercase ,labels=_lowercase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ,lowerCamelCase : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ConvNextVaBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
__SCREAMING_SNAKE_CASE = model(_lowercase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = ConvNextVaBackbone(config=_lowercase )
model.to(_lowercase )
model.eval()
__SCREAMING_SNAKE_CASE = model(_lowercase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values, """labels""": labels}
return config, inputs_dict
@require_torch
class __a ( __snake_case, __snake_case, unittest.TestCase ):
__UpperCamelCase : Any = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase : int = (
{'feature-extraction': ConvNextVaModel, 'image-classification': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase : Any = False
__UpperCamelCase : Dict = False
__UpperCamelCase : str = False
__UpperCamelCase : str = False
__UpperCamelCase : Union[str, Any] = False
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ConvNextVaModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=_lowercase ,has_text_modality=_lowercase ,hidden_size=37 )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return
@unittest.skip(reason="""ConvNextV2 does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip(reason="""ConvNextV2 does not use feedforward chunking""" )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_with_labels()
__SCREAMING_SNAKE_CASE = True
if model_class.__name__ in [
*get_values(_lowercase ),
*get_values(_lowercase ),
]:
continue
__SCREAMING_SNAKE_CASE = model_class(_lowercase )
model.to(_lowercase )
model.train()
__SCREAMING_SNAKE_CASE = self._prepare_for_class(_lowercase ,_lowercase ,return_labels=_lowercase )
__SCREAMING_SNAKE_CASE = model(**_lowercase ).loss
loss.backward()
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_with_labels()
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
if (
model_class.__name__
in [*get_values(_lowercase ), *get_values(_lowercase )]
or not model_class.supports_gradient_checkpointing
):
continue
__SCREAMING_SNAKE_CASE = model_class(_lowercase )
model.to(_lowercase )
model.gradient_checkpointing_enable()
model.train()
__SCREAMING_SNAKE_CASE = self._prepare_for_class(_lowercase ,_lowercase ,return_labels=_lowercase )
__SCREAMING_SNAKE_CASE = model(**_lowercase ).loss
loss.backward()
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(_lowercase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_lowercase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_lowercase )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Optional[int] ):
__SCREAMING_SNAKE_CASE = model_class(_lowercase )
model.to(_lowercase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(_lowercase ,_lowercase ) )
__SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__SCREAMING_SNAKE_CASE = self.model_tester.num_stages
self.assertEqual(len(_lowercase ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(_lowercase ,_lowercase ,_lowercase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(_lowercase ,_lowercase ,_lowercase )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_lowercase )
@slow
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = ConvNextVaModel.from_pretrained(_lowercase )
self.assertIsNotNone(_lowercase )
def __magic_name__ ( ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ConvNextVaForImageClassification.from_pretrained("""facebook/convnextv2-tiny-1k-224""" ).to(_lowercase )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = preprocessor(images=_lowercase ,return_tensors="""pt""" ).to(_lowercase )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**_lowercase )
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_lowercase )
__SCREAMING_SNAKE_CASE = torch.tensor([0.9_996, 0.1_966, -0.4_386] ).to(_lowercase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_lowercase ,atol=1E-4 ) )
| 718 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str]=7 ,lowerCamelCase : List[str]=3 ,lowerCamelCase : List[str]=18 ,lowerCamelCase : Any=30 ,lowerCamelCase : Optional[Any]=400 ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=None ,lowerCamelCase : str=True ,lowerCamelCase : Dict=[0.48_145_466, 0.4_578_275, 0.40_821_073] ,lowerCamelCase : List[str]=[0.26_862_954, 0.26_130_258, 0.27_577_711] ,lowerCamelCase : Tuple=True ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 224, """width""": 224}
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
__SCREAMING_SNAKE_CASE = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Union[str, Any]=False ,lowerCamelCase : str=False ,lowerCamelCase : str=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) )
else:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 )
image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
if torchify:
__SCREAMING_SNAKE_CASE = [torch.from_numpy(lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,do_center_crop=lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 224, """width""": 224} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=lowerCamelCase )
__SCREAMING_SNAKE_CASE = 3
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 13 | 0 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import PoolFormerImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : int ,lowerCamelCase : Dict ,lowerCamelCase : List[Any]=7 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : List[str]=30 ,lowerCamelCase : Dict=400 ,lowerCamelCase : List[str]=True ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Union[str, Any]=0.9 ,lowerCamelCase : Any=None ,lowerCamelCase : Union[str, Any]=True ,lowerCamelCase : Any=[0.5, 0.5, 0.5] ,lowerCamelCase : List[Any]=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = size if size is not None else {"""shortest_edge""": 30}
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 30, """width""": 30}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize_and_center_crop
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = crop_pct
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return {
"size": self.size,
"do_resize_and_center_crop": self.do_resize_and_center_crop,
"crop_pct": self.crop_pct,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
}
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Tuple = PoolFormerImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = PoolFormerImageProcessingTester(self )
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A__ ,"""do_resize_and_center_crop""" ) )
self.assertTrue(hasattr(A__ ,"""size""" ) )
self.assertTrue(hasattr(A__ ,"""crop_pct""" ) )
self.assertTrue(hasattr(A__ ,"""do_normalize""" ) )
self.assertTrue(hasattr(A__ ,"""image_mean""" ) )
self.assertTrue(hasattr(A__ ,"""image_std""" ) )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 30} )
self.assertEqual(image_processor.crop_size ,{"""height""": 30, """width""": 30} )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A__ )
for image in image_inputs:
self.assertIsInstance(A__ ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(A__ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A__ ,numpify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ ,np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(A__ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A__ ,torchify=A__ )
for image in image_inputs:
self.assertIsInstance(A__ ,torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(A__ ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 719 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
def wrapper(*__UpperCAmelCase , **__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = timeit.default_timer()
__SCREAMING_SNAKE_CASE = func(*__UpperCAmelCase , **__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = timeit.default_timer() - starttime
return delta
__SCREAMING_SNAKE_CASE = func.__name__
return wrapper
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = seq_shapes or {}
for i in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__UpperCAmelCase , _ArrayXD ):
__SCREAMING_SNAKE_CASE = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__UpperCAmelCase , datasets.Value ):
if v.dtype == "string":
__SCREAMING_SNAKE_CASE = """The small grey turtle was surprisingly fast when challenged."""
else:
__SCREAMING_SNAKE_CASE = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__UpperCAmelCase , datasets.Sequence ):
while isinstance(__UpperCAmelCase , datasets.Sequence ):
__SCREAMING_SNAKE_CASE = v.feature
__SCREAMING_SNAKE_CASE = seq_shapes[k]
__SCREAMING_SNAKE_CASE = np.random.rand(*__UpperCAmelCase ).astype(v.dtype )
__SCREAMING_SNAKE_CASE = data
dummy_data.append((i, example) )
return dummy_data
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = generate_examples(__UpperCAmelCase , num_examples=__UpperCAmelCase , seq_shapes=__UpperCAmelCase )
with ArrowWriter(features=__UpperCAmelCase , path=__UpperCAmelCase ) as writer:
for key, record in dummy_data:
__SCREAMING_SNAKE_CASE = features.encode_example(__UpperCAmelCase )
writer.write(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
__SCREAMING_SNAKE_CASE = datasets.Dataset.from_file(filename=__UpperCAmelCase , info=datasets.DatasetInfo(features=__UpperCAmelCase ) )
return dataset
| 13 | 0 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class __a ( __SCREAMING_SNAKE_CASE ):
def __init__( self : List[Any] ):
'''simple docstring'''
self.test()
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = False
while not completed:
if counter == 1:
self.reset()
__SCREAMING_SNAKE_CASE = self.advance()
if not self.does_advance(_a ):
raise Exception(
"""Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.""" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.update(_a )
counter += 1
if counter > 1_0000:
raise Exception("""update() does not fulfill the constraint.""" )
if self.remaining() != 0:
raise Exception("""Custom Constraint is not defined correctly.""" )
@abstractmethod
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Tuple ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
@abstractmethod
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Dict=False ):
'''simple docstring'''
raise NotImplementedError(
f"""{self.__class__} is an abstract class. Only classes inheriting this class can be called.""" )
class __a ( __SCREAMING_SNAKE_CASE ):
def __init__( self : str ,lowerCamelCase : List[str] ):
'''simple docstring'''
super(_a ,self ).__init__()
if not isinstance(_a ,_a ) or len(_a ) == 0:
raise ValueError(f"""`token_ids` has to be a non-empty list, but is {token_ids}.""" )
if any((not isinstance(_a ,_a ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f"""Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.""" )
__SCREAMING_SNAKE_CASE = token_ids
__SCREAMING_SNAKE_CASE = len(self.token_ids )
__SCREAMING_SNAKE_CASE = -1 # the index of the currently fulfilled step
__SCREAMING_SNAKE_CASE = False
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Dict ):
'''simple docstring'''
if not isinstance(_a ,_a ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(_a )}""" )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Any ):
'''simple docstring'''
if not isinstance(_a ,_a ):
raise ValueError(f"""`token_id` has to be an `int`, but is {token_id} of type {type(_a )}""" )
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
if self.does_advance(_a ):
self.fulfilled_idx += 1
__SCREAMING_SNAKE_CASE = True
if self.fulfilled_idx == (self.seqlen - 1):
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = completed
else:
# failed to make progress.
__SCREAMING_SNAKE_CASE = True
self.reset()
return stepped, completed, reset
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = 0
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return self.seqlen - (self.fulfilled_idx + 1)
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Optional[Any]=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = PhrasalConstraint(self.token_ids )
if stateful:
__SCREAMING_SNAKE_CASE = self.seqlen
__SCREAMING_SNAKE_CASE = self.fulfilled_idx
__SCREAMING_SNAKE_CASE = self.completed
return new_constraint
class __a :
def __init__( self : Union[str, Any] ,lowerCamelCase : int ,lowerCamelCase : Any=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = max([len(_a ) for one in nested_token_ids] )
__SCREAMING_SNAKE_CASE = {}
for token_ids in nested_token_ids:
__SCREAMING_SNAKE_CASE = root
for tidx, token_id in enumerate(_a ):
if token_id not in level:
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = level[token_id]
if no_subsets and self.has_subsets(_a ,_a ):
raise ValueError(
"""Each list in `nested_token_ids` can\'t be a complete subset of another list, but is"""
f""" {nested_token_ids}.""" )
__SCREAMING_SNAKE_CASE = root
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.trie
for current_token in current_seq:
__SCREAMING_SNAKE_CASE = start[current_token]
__SCREAMING_SNAKE_CASE = list(start.keys() )
return next_tokens
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.next_tokens(_a )
return len(_a ) == 0
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = list(root.values() )
if len(_a ) == 0:
return 1
else:
return sum([self.count_leaves(_a ) for nn in next_nodes] )
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Any ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.count_leaves(_a )
return len(_a ) != leaf_count
class __a ( __SCREAMING_SNAKE_CASE ):
def __init__( self : Dict ,lowerCamelCase : List[str] ):
'''simple docstring'''
super(_a ,self ).__init__()
if not isinstance(_a ,_a ) or len(_a ) == 0:
raise ValueError(f"""`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.""" )
if any(not isinstance(_a ,_a ) for token_ids in nested_token_ids ):
raise ValueError(f"""`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.""" )
if any(
any((not isinstance(_a ,_a ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f"""Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.""" )
__SCREAMING_SNAKE_CASE = DisjunctiveTrie(_a )
__SCREAMING_SNAKE_CASE = nested_token_ids
__SCREAMING_SNAKE_CASE = self.trie.max_height
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = False
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.trie.next_tokens(self.current_seq )
if len(_a ) == 0:
return None
else:
return token_list
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : int ):
'''simple docstring'''
if not isinstance(_a ,_a ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_a )}""" )
__SCREAMING_SNAKE_CASE = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : int ):
'''simple docstring'''
if not isinstance(_a ,_a ):
raise ValueError(f"""`token_id` is supposed to be type `int`, but is {token_id} of type {type(_a )}""" )
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = False
if self.does_advance(_a ):
self.current_seq.append(_a )
__SCREAMING_SNAKE_CASE = True
else:
__SCREAMING_SNAKE_CASE = True
self.reset()
__SCREAMING_SNAKE_CASE = self.trie.reached_leaf(self.current_seq )
__SCREAMING_SNAKE_CASE = completed
return stepped, completed, reset
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Union[str, Any]=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = DisjunctiveConstraint(self.token_ids )
if stateful:
__SCREAMING_SNAKE_CASE = self.seqlen
__SCREAMING_SNAKE_CASE = self.current_seq
__SCREAMING_SNAKE_CASE = self.completed
return new_constraint
class __a :
def __init__( self : Optional[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = constraints
# max # of steps required to fulfill a given constraint
__SCREAMING_SNAKE_CASE = max([c.seqlen for c in constraints] )
__SCREAMING_SNAKE_CASE = len(_a )
__SCREAMING_SNAKE_CASE = False
self.init_state()
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = [constraint.copy(stateful=_a ) for constraint in self.constraints]
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__SCREAMING_SNAKE_CASE = constraint.advance()
if isinstance(_a ,_a ):
token_list.append(_a )
elif isinstance(_a ,_a ):
token_list.extend(_a )
else:
__SCREAMING_SNAKE_CASE = self.inprogress_constraint.advance()
if isinstance(_a ,_a ):
token_list.append(_a )
elif isinstance(_a ,_a ):
token_list.extend(_a )
if len(_a ) == 0:
return None
else:
return token_list
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.add(_a )
# the entire list of constraints are fulfilled
if self.completed:
break
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Tuple ):
'''simple docstring'''
if not isinstance(_a ,_a ):
raise ValueError(f"""`token_id` should be an `int`, but is `{token_id}`.""" )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False, False
if self.completed:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.inprogress_constraint.update(_a )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_a ) )
__SCREAMING_SNAKE_CASE = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__SCREAMING_SNAKE_CASE = None
if len(self.pending_constraints ) == 0:
# we're done!
__SCREAMING_SNAKE_CASE = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_a ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = pending_constraint.update(_a )
if not stepped:
raise Exception(
"""`constraint.update(token_id)` is not yielding incremental progress, """
"""even though `constraint.does_advance(token_id)` is true.""" )
if complete:
self.complete_constraints.append(_a )
__SCREAMING_SNAKE_CASE = None
if not complete and stepped:
__SCREAMING_SNAKE_CASE = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__SCREAMING_SNAKE_CASE = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__SCREAMING_SNAKE_CASE = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[Any]=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__SCREAMING_SNAKE_CASE = [
constraint.copy(stateful=_a ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__SCREAMING_SNAKE_CASE = self.inprogress_constraint.copy(stateful=_a )
__SCREAMING_SNAKE_CASE = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 720 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a = "__DUMMY_TRANSFORMERS_USER__"
a = "Dummy User"
a = "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"
a = "https://hub-ci.huggingface.co"
a = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
a = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
a = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(__UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
return HfApi(endpoint=__UpperCAmelCase )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfFolder.get_token()
HfFolder.save_token(__UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
def _cleanup_repo(__UpperCAmelCase ):
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
@contextmanager
def _temporary_repo(__UpperCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(__UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 13 | 0 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
a = (
"""This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate """
"""library. You can have a look at this example script for pointers: """
"""https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"""
)
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , """sklearn""" )
return (preds == labels).mean()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , """sklearn""" )
__SCREAMING_SNAKE_CASE = simple_accuracy(_lowerCamelCase , _lowerCamelCase )
__SCREAMING_SNAKE_CASE = fa_score(y_true=_lowerCamelCase , y_pred=_lowerCamelCase )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , """sklearn""" )
__SCREAMING_SNAKE_CASE = pearsonr(_lowerCamelCase , _lowerCamelCase )[0]
__SCREAMING_SNAKE_CASE = spearmanr(_lowerCamelCase , _lowerCamelCase )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , """sklearn""" )
assert len(_lowerCamelCase ) == len(_lowerCamelCase ), f"""Predictions and labels have mismatched lengths {len(_lowerCamelCase )} and {len(_lowerCamelCase )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "mrpc":
return acc_and_fa(_lowerCamelCase , _lowerCamelCase )
elif task_name == "sts-b":
return pearson_and_spearman(_lowerCamelCase , _lowerCamelCase )
elif task_name == "qqp":
return acc_and_fa(_lowerCamelCase , _lowerCamelCase )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "qnli":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "rte":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "wnli":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
elif task_name == "hans":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
else:
raise KeyError(_lowerCamelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
warnings.warn(_lowerCamelCase , _lowerCamelCase )
requires_backends(_lowerCamelCase , """sklearn""" )
if len(_lowerCamelCase ) != len(_lowerCamelCase ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(_lowerCamelCase )} and {len(_lowerCamelCase )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(_lowerCamelCase , _lowerCamelCase )}
else:
raise KeyError(_lowerCamelCase )
| 721 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : Dict = 'deta'
__UpperCamelCase : List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Any=900 ,lowerCamelCase : int=2048 ,lowerCamelCase : Any=6 ,lowerCamelCase : Optional[Any]=2048 ,lowerCamelCase : str=8 ,lowerCamelCase : Union[str, Any]=6 ,lowerCamelCase : List[str]=1024 ,lowerCamelCase : int=8 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Any=True ,lowerCamelCase : Optional[int]="relu" ,lowerCamelCase : int=256 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[Any]=0.0 ,lowerCamelCase : Tuple=0.0 ,lowerCamelCase : List[str]=0.02 ,lowerCamelCase : Any=1.0 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]="sine" ,lowerCamelCase : Dict=5 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Any=True ,lowerCamelCase : int=300 ,lowerCamelCase : Any=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : int=1 ,lowerCamelCase : Tuple=5 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : int=1 ,lowerCamelCase : str=5 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.25 ,**lowerCamelCase : int ,):
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase )
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = auxiliary_loss
__SCREAMING_SNAKE_CASE = position_embedding_type
# deformable attributes
__SCREAMING_SNAKE_CASE = num_feature_levels
__SCREAMING_SNAKE_CASE = encoder_n_points
__SCREAMING_SNAKE_CASE = decoder_n_points
__SCREAMING_SNAKE_CASE = two_stage
__SCREAMING_SNAKE_CASE = two_stage_num_proposals
__SCREAMING_SNAKE_CASE = with_box_refine
__SCREAMING_SNAKE_CASE = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
__SCREAMING_SNAKE_CASE = class_cost
__SCREAMING_SNAKE_CASE = bbox_cost
__SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE = mask_loss_coefficient
__SCREAMING_SNAKE_CASE = dice_loss_coefficient
__SCREAMING_SNAKE_CASE = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE = giou_loss_coefficient
__SCREAMING_SNAKE_CASE = eos_coefficient
__SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.d_model
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 13 | 0 |
import torch
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
if torch.cuda.is_available():
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
else:
__SCREAMING_SNAKE_CASE = 0
print(f"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 700 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape
__SCREAMING_SNAKE_CASE = jax.image.resize(
lowerCamelCase ,shape=(batch, height * 2, width * 2, channels) ,method="""nearest""" ,)
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : int = None
__UpperCamelCase : float = 0.0
__UpperCamelCase : bool = None
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = nn.Dense(lowerCamelCase ,dtype=self.dtype )
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__SCREAMING_SNAKE_CASE = None
if use_nin_shortcut:
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="""VALID""" ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(lowerCamelCase ,1 ) ,1 )
__SCREAMING_SNAKE_CASE = hidden_states + temb
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.dropout(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
if self.conv_shortcut is not None:
__SCREAMING_SNAKE_CASE = self.conv_shortcut(lowerCamelCase )
return hidden_states + residual
| 13 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(SCREAMING_SNAKE_CASE_ )
__SCREAMING_SNAKE_CASE = [[False] * (required_sum + 1) for _ in range(arr_len + 1 )]
# for each arr value, a sum of zero(0) can be formed by not taking any element
# hence True/1
for i in range(arr_len + 1 ):
__SCREAMING_SNAKE_CASE = True
# sum is not zero and set is empty then false
for i in range(1 , required_sum + 1 ):
__SCREAMING_SNAKE_CASE = False
for i in range(1 , arr_len + 1 ):
for j in range(1 , required_sum + 1 ):
if arr[i - 1] > j:
__SCREAMING_SNAKE_CASE = subset[i - 1][j]
if arr[i - 1] <= j:
__SCREAMING_SNAKE_CASE = subset[i - 1][j] or subset[i - 1][j - arr[i - 1]]
return subset[arr_len][required_sum]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
'''simple docstring'''
import sys
from collections import defaultdict
class __a :
def __init__( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.node_position[vertex]
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pos
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = temp, tempa
__SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,lowerCamelCase )
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE = heap[parent]
__SCREAMING_SNAKE_CASE = position[parent]
self.set_position(position[parent] ,lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,lowerCamelCase )
break
__SCREAMING_SNAKE_CASE = parent
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,0 )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(lowerCamelCase ) // 2 - 1
for i in range(lowerCamelCase ,-1 ,-1 ):
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,len(lowerCamelCase ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = positions[0]
__SCREAMING_SNAKE_CASE = sys.maxsize
self.top_to_bottom(lowerCamelCase ,0 ,len(lowerCamelCase ) ,lowerCamelCase )
return temp
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Heap()
__SCREAMING_SNAKE_CASE = [0] * len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE = []
for vertex in range(len(__UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__UpperCAmelCase )
heap.node_position.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = distance
heap.heapify(__UpperCAmelCase , __UpperCAmelCase )
for _ in range(1 , len(__UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__UpperCAmelCase )]
):
__SCREAMING_SNAKE_CASE = distance
heap.bottom_to_top(
__UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
a = int(input("Enter number of edges: ").strip())
a = defaultdict(list)
for _ in range(edges_number):
a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> list:
'''simple docstring'''
if bit_count < 0:
raise ValueError("""The given input must be positive""" )
# get the generated string sequence
__SCREAMING_SNAKE_CASE = gray_code_sequence_string(SCREAMING_SNAKE_CASE_ )
#
# convert them to integers
for i in range(len(SCREAMING_SNAKE_CASE_ ) ):
__SCREAMING_SNAKE_CASE = int(sequence[i] , 2 )
return sequence
def __magic_name__ ( __UpperCAmelCase ) -> list:
'''simple docstring'''
if bit_count == 0:
return ["0"]
if bit_count == 1:
return ["0", "1"]
__SCREAMING_SNAKE_CASE = 1 << bit_count # defines the length of the sequence
# 1<< n is equivalent to 2^n
# recursive answer will generate answer for n-1 bits
__SCREAMING_SNAKE_CASE = gray_code_sequence_string(bit_count - 1 )
__SCREAMING_SNAKE_CASE = []
# append 0 to first half of the smaller sequence generated
for i in range(seq_len // 2 ):
__SCREAMING_SNAKE_CASE = '0' + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
# append 1 to second half ... start from the end of the list
for i in reversed(range(seq_len // 2 ) ):
__SCREAMING_SNAKE_CASE = '1' + smaller_sequence[i]
sequence.append(SCREAMING_SNAKE_CASE_ )
return sequence
if __name__ == "__main__":
import doctest
doctest.testmod()
| 702 |
'''simple docstring'''
import os
import string
import sys
a = 1 << 8
a = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
a = KEYMAP["up"]
a = KEYMAP["left"]
if sys.platform == "win32":
a = []
a = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
a = ord(str(i))
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
__SCREAMING_SNAKE_CASE = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCAmelCase ) == 0:
# Read the keystroke
__SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__UpperCAmelCase )
if ord(__UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__SCREAMING_SNAKE_CASE = chr(KEYMAP["""esc"""] )
except KeyError:
__SCREAMING_SNAKE_CASE = cha[1]
else:
__SCREAMING_SNAKE_CASE = ch.decode(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__SCREAMING_SNAKE_CASE = sys.stdin.fileno()
__SCREAMING_SNAKE_CASE = termios.tcgetattr(__UpperCAmelCase )
try:
tty.setraw(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase )
return ch
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCAmelCase ) == KEYMAP["esc"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 13 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (num_of_terms / 2) * (2 * first_term + (num_of_terms - 1) * common_diff)
# formula for sum of series
return total
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
print(sum_of_series(1 , 1 , 10 ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
'''simple docstring'''
from __future__ import annotations
import bisect
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
__SCREAMING_SNAKE_CASE = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE = midpoint - 1
else:
__SCREAMING_SNAKE_CASE = midpoint + 1
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if index != len(__UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
if right < left:
return None
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase )
if __name__ == "__main__":
a = input("Enter numbers separated by comma:\n").strip()
a = sorted(int(item) for item in user_input.split(","))
a = int(input("Enter a single number to be found in the list:\n"))
a = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 13 | 0 |
'''simple docstring'''
import warnings
from diffusers import StableDiffusionInpaintPipeline as StableDiffusionInpaintPipeline # noqa F401
warnings.warn(
"The `inpainting.py` script is outdated. Please use directly `from diffusers import"
" StableDiffusionInpaintPipeline` instead."
)
| 704 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a = logging.get_logger(__name__)
class __a ( _snake_case ):
__UpperCamelCase : int = 'linear'
__UpperCamelCase : Tuple = 'cosine'
__UpperCamelCase : Tuple = 'cosine_with_restarts'
__UpperCamelCase : List[Any] = 'polynomial'
__UpperCamelCase : Optional[Any] = 'constant'
__UpperCamelCase : Optional[int] = 'constant_with_warmup'
__UpperCamelCase : List[Any] = 'piecewise_constant'
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> List[Any]:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE = int(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = float(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ):
def rule_func(__UpperCAmelCase ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(__UpperCAmelCase , __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> int:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Tuple:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SchedulerType(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
| 13 | 0 |
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
a = logging.getLogger(__name__)
def __magic_name__ ( __UpperCAmelCase=2 , __UpperCAmelCase=3 , __UpperCAmelCase=16 , __UpperCAmelCase = 10 , __UpperCAmelCase = 2 ) -> int:
'''simple docstring'''
def get_dataset(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = torch.randn(batch_size * n_batches , 1 )
return TensorDataset(lowerCAmelCase__ , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1 ) )
__SCREAMING_SNAKE_CASE = get_dataset(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = get_dataset(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = DataLoader(lowerCAmelCase__ , shuffle=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , num_workers=4 )
__SCREAMING_SNAKE_CASE = DataLoader(lowerCAmelCase__ , shuffle=lowerCAmelCase__ , batch_size=lowerCAmelCase__ , num_workers=4 )
return (train_dataloader, valid_dataloader)
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for epoch in range(lowerCAmelCase__ ):
# Train quickly
model.train()
for batch in dataloader:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = batch
__SCREAMING_SNAKE_CASE = model(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE = torch.nn.functional.mse_loss(lowerCAmelCase__ , lowerCAmelCase__ )
accelerator.backward(lowerCAmelCase__ )
optimizer.step()
optimizer.zero_grad()
rands.append(random.random() ) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __a ( nn.Module ):
'''simple docstring'''
def __init__( self : Any ):
'''simple docstring'''
super().__init__()
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) )
__SCREAMING_SNAKE_CASE = nn.Parameter(torch.randn(1 ) )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Any ):
'''simple docstring'''
return x * self.a + self.b
class __a ( unittest.TestCase ):
'''simple docstring'''
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__SCREAMING_SNAKE_CASE = DummyModel()
__SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dummy_dataloaders()
__SCREAMING_SNAKE_CASE = ProjectConfiguration(total_limit=1 ,project_dir=snake_case__ ,automatic_checkpoint_naming=snake_case__ )
# Train baseline
__SCREAMING_SNAKE_CASE = Accelerator(project_config=snake_case__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) ,1 )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__SCREAMING_SNAKE_CASE = DummyModel()
__SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dummy_dataloaders()
# Train baseline
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
# Save initial
__SCREAMING_SNAKE_CASE = os.path.join(snake_case__ ,"""initial""" )
accelerator.save_state(snake_case__ )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE = optimizer.state_dict()
__SCREAMING_SNAKE_CASE = train(3 ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE = optimizer.state_dict()
# Train partially
set_seed(42 )
__SCREAMING_SNAKE_CASE = DummyModel()
__SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dummy_dataloaders()
__SCREAMING_SNAKE_CASE = Accelerator()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
accelerator.load_state(snake_case__ )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(snake_case__ ,snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
__SCREAMING_SNAKE_CASE = train(2 ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
# Save everything
__SCREAMING_SNAKE_CASE = os.path.join(snake_case__ ,"""checkpoint""" )
accelerator.save_state(snake_case__ )
# Load everything back in and make sure all states work
accelerator.load_state(snake_case__ )
test_rands += train(1 ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(snake_case__ ,snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__SCREAMING_SNAKE_CASE = DummyModel()
__SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dummy_dataloaders()
__SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=snake_case__ )
# Train baseline
__SCREAMING_SNAKE_CASE = Accelerator(project_dir=snake_case__ ,project_config=snake_case__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
# Save initial
accelerator.save_state()
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE = optimizer.state_dict()
__SCREAMING_SNAKE_CASE = train(3 ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE = optimizer.state_dict()
# Train partially
set_seed(42 )
__SCREAMING_SNAKE_CASE = DummyModel()
__SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dummy_dataloaders()
__SCREAMING_SNAKE_CASE = ProjectConfiguration(iteration=1 ,automatic_checkpoint_naming=snake_case__ )
__SCREAMING_SNAKE_CASE = Accelerator(project_dir=snake_case__ ,project_config=snake_case__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
accelerator.load_state(os.path.join(snake_case__ ,"""checkpoints""" ,"""checkpoint_0""" ) )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(snake_case__ ,snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
__SCREAMING_SNAKE_CASE = train(2 ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(snake_case__ ,"""checkpoints""" ,"""checkpoint_1""" ) )
test_rands += train(1 ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
((__SCREAMING_SNAKE_CASE) , (__SCREAMING_SNAKE_CASE)) = model.a.item(), model.b.item()
__SCREAMING_SNAKE_CASE = optimizer.state_dict()
self.assertEqual(snake_case__ ,snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
self.assertEqual(snake_case__ ,snake_case__ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.tensor([1, 2, 3] )
__SCREAMING_SNAKE_CASE = torch.tensor([2, 3, 4] )
__SCREAMING_SNAKE_CASE = DummyModel()
__SCREAMING_SNAKE_CASE = torch.optim.Adam(net.parameters() )
__SCREAMING_SNAKE_CASE = Accelerator()
with self.assertRaises(snake_case__ ) as ve:
accelerator.register_for_checkpointing(snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
__SCREAMING_SNAKE_CASE = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__SCREAMING_SNAKE_CASE = DummyModel()
__SCREAMING_SNAKE_CASE = torch.optim.Adam(params=model.parameters() ,lr=1E-3 )
__SCREAMING_SNAKE_CASE = torch.optim.lr_scheduler.StepLR(snake_case__ ,step_size=1 ,gamma=0.99 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = dummy_dataloaders()
__SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=snake_case__ )
# Train baseline
__SCREAMING_SNAKE_CASE = Accelerator(project_dir=snake_case__ ,project_config=snake_case__ )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = accelerator.prepare(
snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
# Save initial
accelerator.save_state()
__SCREAMING_SNAKE_CASE = scheduler.state_dict()
train(3 ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ ,snake_case__ )
self.assertNotEqual(snake_case__ ,scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(snake_case__ ,"""checkpoints""" ,"""checkpoint_0""" ) )
self.assertEqual(snake_case__ ,scheduler.state_dict() )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
__SCREAMING_SNAKE_CASE = DummyModel()
__SCREAMING_SNAKE_CASE = ProjectConfiguration(automatic_checkpoint_naming=snake_case__ ,total_limit=2 )
# Train baseline
__SCREAMING_SNAKE_CASE = Accelerator(project_dir=snake_case__ ,project_config=snake_case__ )
__SCREAMING_SNAKE_CASE = accelerator.prepare(snake_case__ )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(snake_case__ ,"""checkpoints""" ,"""checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ ,"""checkpoints""" ,"""checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(snake_case__ ,"""checkpoints""" ,"""checkpoint_10""" ) ) )
@require_cuda
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["""torchrun""", f"""--nproc_per_node={torch.cuda.device_count()}""", inspect.getfile(self.__class__ )]
execute_subprocess_async(snake_case__ ,env=os.environ.copy() )
if __name__ == "__main__":
a = '''/tmp/accelerate/state_checkpointing'''
a = DummyModel()
a = torch.optim.Adam(params=model.parameters(), lr=1E-3)
a = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.99)
a = dummy_dataloaders()
a = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
a = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision="no")
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
a = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
a = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
a = group['''params'''][0].device
break
assert param_device.type == accelerator.device.type
a = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="cpu")
for group in optimizer.param_groups:
a = group['''params'''][0].device
break
assert (
param_device.type == torch.device("cpu").type
), F"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="on_device")
for group in optimizer.param_groups:
a = group['''params'''][0].device
break
assert (
param_device.type == accelerator.device.type
), F"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match="Unsupported optimizer map location passed"):
accelerator.load_state(os.path.join(savedir, "checkpoints", "checkpoint_0"), map_location="invalid")
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 705 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 | 0 |
'''simple docstring'''
import math
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while num > 0:
__SCREAMING_SNAKE_CASE = num % 8
__SCREAMING_SNAKE_CASE = octal + (remainder * math.floor(math.pow(10 , lowerCAmelCase_ ) ))
counter += 1
__SCREAMING_SNAKE_CASE = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return f"""0o{int(lowerCAmelCase_ )}"""
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(65 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(216 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(512 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main()
| 706 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase = "AAPL" ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase ).text , """html.parser""" )
__SCREAMING_SNAKE_CASE = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 13 | 0 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Image
from .base import TaskTemplate
@dataclass(frozen=_snake_case )
class __a ( _snake_case ):
__UpperCamelCase : Dict = field(default='image-classification', metadata={'include_in_asdict_even_if_is_default': True} )
__UpperCamelCase : Tuple = Features({'image': Image()} )
__UpperCamelCase : List[str] = Features({'labels': ClassLabel} )
__UpperCamelCase : Any = 'image'
__UpperCamelCase : List[Any] = 'labels'
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : int ):
'''simple docstring'''
if self.label_column not in features:
raise ValueError(f"""Column {self.label_column} is not present in features.""" )
if not isinstance(features[self.label_column] ,lowerCamelCase ):
raise ValueError(f"""Column {self.label_column} is not a ClassLabel.""" )
__SCREAMING_SNAKE_CASE = copy.deepcopy(self )
__SCREAMING_SNAKE_CASE = self.label_schema.copy()
__SCREAMING_SNAKE_CASE = features[self.label_column]
__SCREAMING_SNAKE_CASE = label_schema
return task_template
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return {
self.image_column: "image",
self.label_column: "labels",
}
| 707 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if num < 0:
return False
__SCREAMING_SNAKE_CASE = num
__SCREAMING_SNAKE_CASE = 0
while num > 0:
__SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
import inspect
import unittest
import numpy as np
from transformers import DeiTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
TFDeiTModel,
)
from transformers.models.deit.modeling_tf_deit import TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class __a :
def __init__( self : str ,lowerCamelCase : int ,lowerCamelCase : Optional[Any]=13 ,lowerCamelCase : List[Any]=30 ,lowerCamelCase : Tuple=2 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : Any=True ,lowerCamelCase : List[str]=32 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Union[str, Any]=37 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Dict=10 ,lowerCamelCase : List[Any]=0.02 ,lowerCamelCase : Optional[int]=3 ,lowerCamelCase : Any=None ,lowerCamelCase : List[Any]=2 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 2
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return DeiTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_SCREAMING_SNAKE_CASE ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,)
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : int ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFDeiTModel(config=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Any ,lowerCamelCase : int ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFDeiTForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = TFDeiTForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : Tuple ,lowerCamelCase : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = TFDeiTForImageClassification(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = TFDeiTForImageClassification(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __a ( lowerCAmelCase__, lowerCAmelCase__, unittest.TestCase ):
__UpperCamelCase : List[str] = (
(
TFDeiTModel,
TFDeiTForImageClassification,
TFDeiTForImageClassificationWithTeacher,
TFDeiTForMaskedImageModeling,
)
if is_tf_available()
else ()
)
__UpperCamelCase : int = (
{
"feature-extraction": TFDeiTModel,
"image-classification": (TFDeiTForImageClassification, TFDeiTForImageClassificationWithTeacher),
}
if is_tf_available()
else {}
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Any = False
__UpperCamelCase : List[Any] = False
__UpperCamelCase : Union[str, Any] = False
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFDeiTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,has_text_modality=_SCREAMING_SNAKE_CASE ,hidden_size=37 )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DeiT does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() ,(tf.keras.layers.Layer) )
__SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE ,tf.keras.layers.Dense ) )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Dict=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super()._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,return_labels=_SCREAMING_SNAKE_CASE )
if return_labels:
if "labels" in inputs_dict and "labels" not in inspect.signature(model_class.call ).parameters:
del inputs_dict["labels"]
return inputs_dict
@slow
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for model_name in TF_DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = TFDeiTModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def __magic_name__ ( ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return (
DeiTImageProcessor.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFDeiTForImageClassificationWithTeacher.from_pretrained("""facebook/deit-base-distilled-patch16-224""" )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors="""tf""" )
# forward pass
__SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
__SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = tf.constant([-1.0_266, 0.1_912, -1.2_861] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1E-4 ) )
| 708 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a = list[list[float | int]]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for row in range(__UpperCAmelCase ):
for col in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = matrix[row][col]
__SCREAMING_SNAKE_CASE = vector[row][0]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while row < size and col < size:
# pivoting
__SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__UpperCAmelCase , __UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col]
__SCREAMING_SNAKE_CASE = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __UpperCAmelCase ):
for row in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col]
for cola in range(__UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__UpperCAmelCase )
]
def __magic_name__ ( __UpperCAmelCase ) -> Callable[[int], int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = [[0] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for x_val, y_val in enumerate(__UpperCAmelCase ):
for col in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1)
__SCREAMING_SNAKE_CASE = y_val
__SCREAMING_SNAKE_CASE = solve(__UpperCAmelCase , __UpperCAmelCase )
def interpolated_func(__UpperCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__UpperCAmelCase ) )
return interpolated_func
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __magic_name__ ( __UpperCAmelCase = question_function , __UpperCAmelCase = 10 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [func(__UpperCAmelCase ) for x_val in range(1 , order + 1 )]
__SCREAMING_SNAKE_CASE = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for poly in polynomials:
__SCREAMING_SNAKE_CASE = 1
while func(__UpperCAmelCase ) == poly(__UpperCAmelCase ):
x_val += 1
ret += poly(__UpperCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 13 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'asapp/sew-d-tiny-100k': 'https://huggingface.co/asapp/sew-d-tiny-100k/resolve/main/config.json',
# See all SEW-D models at https://huggingface.co/models?filter=sew-d
}
class __a ( _A ):
__UpperCamelCase : Union[str, Any] = 'sew-d'
def __init__( self : List[str] ,lowerCamelCase : Tuple=32 ,lowerCamelCase : Optional[int]=768 ,lowerCamelCase : Tuple=12 ,lowerCamelCase : Optional[Any]=12 ,lowerCamelCase : int=3072 ,lowerCamelCase : Tuple=2 ,lowerCamelCase : List[Any]=512 ,lowerCamelCase : Any=256 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : Dict=True ,lowerCamelCase : str=("p2c", "c2p") ,lowerCamelCase : List[Any]="layer_norm" ,lowerCamelCase : int="gelu_python" ,lowerCamelCase : Optional[int]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : int=0.0 ,lowerCamelCase : Dict=0.1 ,lowerCamelCase : List[Any]=0.02 ,lowerCamelCase : Optional[int]=1E-7 ,lowerCamelCase : List[Any]=1E-5 ,lowerCamelCase : List[str]="group" ,lowerCamelCase : Optional[int]="gelu" ,lowerCamelCase : Tuple=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,lowerCamelCase : str=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,lowerCamelCase : Optional[Any]=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,lowerCamelCase : Optional[Any]=False ,lowerCamelCase : Optional[int]=128 ,lowerCamelCase : Tuple=16 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : Dict=0.05 ,lowerCamelCase : str=10 ,lowerCamelCase : Tuple=2 ,lowerCamelCase : Dict=0.0 ,lowerCamelCase : Dict=10 ,lowerCamelCase : Union[str, Any]=0 ,lowerCamelCase : List[Any]="mean" ,lowerCamelCase : int=False ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Optional[int]=256 ,lowerCamelCase : List[str]=0 ,lowerCamelCase : Union[str, Any]=1 ,lowerCamelCase : List[Any]=2 ,**lowerCamelCase : str ,):
'''simple docstring'''
super().__init__(**UpperCamelCase__ ,pad_token_id=UpperCamelCase__ ,bos_token_id=UpperCamelCase__ ,eos_token_id=UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = list(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = list(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = squeeze_factor
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = position_buckets
__SCREAMING_SNAKE_CASE = share_att_key
__SCREAMING_SNAKE_CASE = relative_attention
__SCREAMING_SNAKE_CASE = norm_rel_ebd
__SCREAMING_SNAKE_CASE = list(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = feature_layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# sequence classification
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE = classifier_proj_size
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 709 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class __a ( _snake_case ):
def __init__( self : Union[str, Any] ,**lowerCamelCase : str ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Dict ,lowerCamelCase : Union[str, List[str], "Image", List["Image"]] ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return super().__call__(lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Union[str, Any]="This is a photo of {}." ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.image_processor(images=[image] ,return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = candidate_labels
__SCREAMING_SNAKE_CASE = [hypothesis_template.format(lowerCamelCase ) for x in candidate_labels]
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,return_tensors=self.framework ,padding=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [text_inputs]
return inputs
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = text_inputs[0]
else:
# Batching case.
__SCREAMING_SNAKE_CASE = text_inputs[0][0]
__SCREAMING_SNAKE_CASE = self.model(**lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_outputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""][0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = probs.tolist()
if not isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = [scores]
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = stable_softmax(lowerCamelCase ,axis=-1 )
__SCREAMING_SNAKE_CASE = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__SCREAMING_SNAKE_CASE = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase ,lowerCamelCase ) ,key=lambda lowerCamelCase : -x[0] )
]
return result
| 13 | 0 |
'''simple docstring'''
import logging
import os
from typing import List, Tuple
import numpy as np
import psutil
import torch
import torch.distributed as dist
from transformers import RagRetriever
a = logging.getLogger(__name__)
class __a ( snake_case__ ):
def __init__( self : str ,lowerCamelCase : List[str] ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[int] ,lowerCamelCase : Optional[Any]=None ):
'''simple docstring'''
super().__init__(
_A ,question_encoder_tokenizer=_A ,generator_tokenizer=_A ,index=_A ,init_retrieval=_A ,)
__SCREAMING_SNAKE_CASE = None
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[Any] ):
'''simple docstring'''
logger.info("""initializing retrieval""" )
# initializing a separate process group for retrieval as the default
# nccl backend doesn't support gather/scatter operations while gloo
# is too slow to replace nccl for the core gpu communication
if dist.is_initialized():
logger.info("""dist initialized""" )
# needs to be set manually
__SCREAMING_SNAKE_CASE = self._infer_socket_ifname()
# avoid clash with the NCCL port
__SCREAMING_SNAKE_CASE = str(distributed_port + 1 )
__SCREAMING_SNAKE_CASE = dist.new_group(ranks=_A ,backend="""gloo""" )
# initialize retriever only on the main worker
if not dist.is_initialized() or self._is_main():
logger.info("""dist not initialized / main""" )
self.index.init_index()
# all processes wait untill the retriever is initialized by the main process
if dist.is_initialized():
torch.distributed.barrier(group=self.process_group )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return dist.get_rank(group=self.process_group ) == 0
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[int]=torch.floataa ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = torch.empty(_A ,dtype=_A )
dist.scatter(_A ,src=0 ,scatter_list=_A ,group=self.process_group )
return target_tensor
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = psutil.net_if_addrs()
# a hacky way to deal with varying network interface names
__SCREAMING_SNAKE_CASE = next((addr for addr in addrs if addr.startswith("""e""" )) ,_A )
return ifname
def UpperCAmelCase__ ( self : str ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
if not dist.is_initialized():
__SCREAMING_SNAKE_CASE = self._main_retrieve(_A ,_A )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(_A )
# distributed training
__SCREAMING_SNAKE_CASE = dist.get_world_size(group=self.process_group )
# gather logic
__SCREAMING_SNAKE_CASE = None
if self._is_main():
__SCREAMING_SNAKE_CASE = [torch.empty(question_hidden_states.shape ,dtype=torch.floataa ) for _ in range(_A )]
dist.gather(torch.tensor(_A ) ,dst=0 ,gather_list=_A ,group=self.process_group )
# scatter logic
__SCREAMING_SNAKE_CASE = question_hidden_states.shape[0]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
if self._is_main():
assert len(_A ) == world_size
__SCREAMING_SNAKE_CASE = self._main_retrieve(torch.cat(_A ).numpy() ,_A )
__SCREAMING_SNAKE_CASE = torch.tensor(_A ), torch.tensor(_A )
__SCREAMING_SNAKE_CASE = self._chunk_tensor(_A ,_A )
__SCREAMING_SNAKE_CASE = self._chunk_tensor(_A ,_A )
__SCREAMING_SNAKE_CASE = self._scattered(_A ,[n_queries, n_docs] ,target_type=torch.intaa )
__SCREAMING_SNAKE_CASE = self._scattered(_A ,[n_queries, n_docs, question_hidden_states.shape[1]] )
return retrieved_doc_embeds.numpy(), doc_ids.numpy(), self.index.get_doc_dicts(_A )
| 710 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
require_version(deps[pkg] , __UpperCAmelCase )
| 13 | 0 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available() and is_transformers_version(">=", "4.25.0")):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import UnCLIPImageVariationPipeline, UnCLIPPipeline
else:
from .pipeline_unclip import UnCLIPPipeline
from .pipeline_unclip_image_variation import UnCLIPImageVariationPipeline
from .text_proj import UnCLIPTextProjModel
| 711 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
a = logging.getLogger(__name__)
@dataclass
class __a :
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}, )
__UpperCamelCase : int = field(
default=1024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the training data.'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the validation data.'} )
__UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__SCREAMING_SNAKE_CASE = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__SCREAMING_SNAKE_CASE = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __a :
__UpperCamelCase : str = field(
default=_snake_case, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, )
__UpperCamelCase : str = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
def __magic_name__ ( ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__SCREAMING_SNAKE_CASE = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__SCREAMING_SNAKE_CASE = data_args.train_file.split(""".""" )[-1]
__SCREAMING_SNAKE_CASE = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__SCREAMING_SNAKE_CASE = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__SCREAMING_SNAKE_CASE = load_dataset("""csv""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__SCREAMING_SNAKE_CASE = load_dataset("""json""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features["""label"""].names
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__SCREAMING_SNAKE_CASE = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__SCREAMING_SNAKE_CASE = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__SCREAMING_SNAKE_CASE = {"""Refused""": 0, """Entailed""": 1}
__SCREAMING_SNAKE_CASE = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__UpperCAmelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__SCREAMING_SNAKE_CASE = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__SCREAMING_SNAKE_CASE = examples["""statement"""]
__SCREAMING_SNAKE_CASE = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__SCREAMING_SNAKE_CASE = tokenizer(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__SCREAMING_SNAKE_CASE = raw_datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__SCREAMING_SNAKE_CASE = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__UpperCAmelCase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , __UpperCAmelCase ) else p.predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = default_data_collator
elif training_args.fpaa:
__SCREAMING_SNAKE_CASE = DataCollatorWithPadding(__UpperCAmelCase , pad_to_multiple_of=8 )
else:
__SCREAMING_SNAKE_CASE = None
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = train_result.metrics
__SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __UpperCAmelCase )
trainer.save_metrics("""train""" , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics("""eval""" , __UpperCAmelCase )
trainer.save_metrics("""eval""" , __UpperCAmelCase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__SCREAMING_SNAKE_CASE = predict_dataset.remove_columns("""label""" )
__SCREAMING_SNAKE_CASE = trainer.predict(__UpperCAmelCase , metric_key_prefix="""predict""" ).predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
__SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(__UpperCAmelCase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
__SCREAMING_SNAKE_CASE = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 13 | 0 |
'''simple docstring'''
import warnings
from ...utils import is_sklearn_available, requires_backends
if is_sklearn_available():
from scipy.stats import pearsonr, spearmanr
from sklearn.metrics import fa_score, matthews_corrcoef
a = (
"This metric will be removed from the library soon, metrics should be handled with the 🤗 Evaluate "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py"
)
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , """sklearn""" )
return (preds == labels).mean()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , """sklearn""" )
__SCREAMING_SNAKE_CASE = simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )
__SCREAMING_SNAKE_CASE = fa_score(y_true=lowerCamelCase_ , y_pred=lowerCamelCase_ )
return {
"acc": acc,
"f1": fa,
"acc_and_f1": (acc + fa) / 2,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , """sklearn""" )
__SCREAMING_SNAKE_CASE = pearsonr(lowerCamelCase_ , lowerCamelCase_ )[0]
__SCREAMING_SNAKE_CASE = spearmanr(lowerCamelCase_ , lowerCamelCase_ )[0]
return {
"pearson": pearson_corr,
"spearmanr": spearman_corr,
"corr": (pearson_corr + spearman_corr) / 2,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , """sklearn""" )
assert len(lowerCamelCase_ ) == len(lowerCamelCase_ ), f"""Predictions and labels have mismatched lengths {len(lowerCamelCase_ )} and {len(lowerCamelCase_ )}"""
if task_name == "cola":
return {"mcc": matthews_corrcoef(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "sst-2":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "mrpc":
return acc_and_fa(lowerCamelCase_ , lowerCamelCase_ )
elif task_name == "sts-b":
return pearson_and_spearman(lowerCamelCase_ , lowerCamelCase_ )
elif task_name == "qqp":
return acc_and_fa(lowerCamelCase_ , lowerCamelCase_ )
elif task_name == "mnli":
return {"mnli/acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "mnli-mm":
return {"mnli-mm/acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "qnli":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "rte":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "wnli":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
elif task_name == "hans":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
else:
raise KeyError(lowerCamelCase_ )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
warnings.warn(lowerCamelCase_ , lowerCamelCase_ )
requires_backends(lowerCamelCase_ , """sklearn""" )
if len(lowerCamelCase_ ) != len(lowerCamelCase_ ):
raise ValueError(f"""Predictions and labels have mismatched lengths {len(lowerCamelCase_ )} and {len(lowerCamelCase_ )}""" )
if task_name == "xnli":
return {"acc": simple_accuracy(lowerCamelCase_ , lowerCamelCase_ )}
else:
raise KeyError(lowerCamelCase_ )
| 712 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 13 | 0 |
'''simple docstring'''
import itertools
import random
import unittest
import numpy as np
from transformers import is_speech_available
from transformers.testing_utils import require_torch, require_torchaudio
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import SpeechaTextFeatureExtractor
a = random.Random()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=1.0 , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Union[str, Any]:
'''simple docstring'''
if rng is None:
__SCREAMING_SNAKE_CASE = global_rng
__SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class __a ( unittest.TestCase ):
def __init__( self : Dict ,lowerCamelCase : str ,lowerCamelCase : Optional[int]=7 ,lowerCamelCase : Dict=400 ,lowerCamelCase : Dict=2000 ,lowerCamelCase : str=24 ,lowerCamelCase : Any=24 ,lowerCamelCase : Optional[Any]=0.0 ,lowerCamelCase : Optional[int]=1_6000 ,lowerCamelCase : Any=True ,lowerCamelCase : Tuple=True ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = min_seq_length
__SCREAMING_SNAKE_CASE = max_seq_length
__SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = num_mel_bins
__SCREAMING_SNAKE_CASE = padding_value
__SCREAMING_SNAKE_CASE = sampling_rate
__SCREAMING_SNAKE_CASE = return_attention_mask
__SCREAMING_SNAKE_CASE = do_normalize
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return {
"feature_size": self.feature_size,
"num_mel_bins": self.num_mel_bins,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[Any]=False ,lowerCamelCase : Optional[Any]=False ):
'''simple docstring'''
def _flatten(lowerCamelCase : Any ):
return list(itertools.chain(*UpperCamelCase_ ) )
if equal_length:
__SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__SCREAMING_SNAKE_CASE = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
__SCREAMING_SNAKE_CASE = [np.asarray(UpperCamelCase_ ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __a ( __lowerCamelCase, unittest.TestCase ):
__UpperCamelCase : Any = SpeechaTextFeatureExtractor if is_speech_available() else None
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SpeechaTextFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : Dict ):
'''simple docstring'''
self.assertTrue(np.all(np.mean(UpperCamelCase_ ,axis=0 ) < 1E-3 ) )
self.assertTrue(np.all(np.abs(np.var(UpperCamelCase_ ,axis=0 ) - 1 ) < 1E-3 ) )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
__SCREAMING_SNAKE_CASE = [np.asarray(UpperCamelCase_ ) for speech_input in speech_inputs]
# Test feature size
__SCREAMING_SNAKE_CASE = feature_extractor(UpperCamelCase_ ,padding=UpperCamelCase_ ,return_tensors="""np""" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.feature_size )
# Test not batched input
__SCREAMING_SNAKE_CASE = feature_extractor(speech_inputs[0] ,return_tensors="""np""" ).input_features
__SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ).input_features
self.assertTrue(np.allclose(UpperCamelCase_ ,UpperCamelCase_ ,atol=1E-3 ) )
# Test batched
__SCREAMING_SNAKE_CASE = feature_extractor(UpperCamelCase_ ,return_tensors="""np""" ).input_features
__SCREAMING_SNAKE_CASE = feature_extractor(UpperCamelCase_ ,return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ ,UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ ,UpperCamelCase_ ,atol=1E-3 ) )
# Test 2-D numpy arrays are batched.
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__SCREAMING_SNAKE_CASE = np.asarray(UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = feature_extractor(UpperCamelCase_ ,return_tensors="""np""" ).input_features
__SCREAMING_SNAKE_CASE = feature_extractor(UpperCamelCase_ ,return_tensors="""np""" ).input_features
for enc_seq_a, enc_seq_a in zip(UpperCamelCase_ ,UpperCamelCase_ ):
self.assertTrue(np.allclose(UpperCamelCase_ ,UpperCamelCase_ ,atol=1E-3 ) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
__SCREAMING_SNAKE_CASE = ["""longest""", """max_length""", """do_not_pad"""]
__SCREAMING_SNAKE_CASE = [None, 16, None]
for max_length, padding in zip(UpperCamelCase_ ,UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = feature_extractor(
UpperCamelCase_ ,padding=UpperCamelCase_ ,max_length=UpperCamelCase_ ,return_attention_mask=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = inputs.input_features
__SCREAMING_SNAKE_CASE = inputs.attention_mask
__SCREAMING_SNAKE_CASE = [np.sum(UpperCamelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
__SCREAMING_SNAKE_CASE = ["""longest""", """max_length""", """do_not_pad"""]
__SCREAMING_SNAKE_CASE = [None, 16, None]
for max_length, padding in zip(UpperCamelCase_ ,UpperCamelCase_ ):
__SCREAMING_SNAKE_CASE = feature_extractor(
UpperCamelCase_ ,max_length=UpperCamelCase_ ,padding=UpperCamelCase_ ,return_tensors="""np""" ,return_attention_mask=UpperCamelCase_ )
__SCREAMING_SNAKE_CASE = inputs.input_features
__SCREAMING_SNAKE_CASE = inputs.attention_mask
__SCREAMING_SNAKE_CASE = [np.sum(UpperCamelCase_ ) for x in attention_mask]
self._check_zero_mean_unit_variance(input_features[0][: fbank_feat_lengths[0]] )
self.assertTrue(input_features[0][fbank_feat_lengths[0] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[1][: fbank_feat_lengths[1]] )
self.assertTrue(input_features[0][fbank_feat_lengths[1] :].sum() < 1E-6 )
self._check_zero_mean_unit_variance(input_features[2][: fbank_feat_lengths[2]] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
__SCREAMING_SNAKE_CASE = feature_extractor(
UpperCamelCase_ ,padding="""max_length""" ,max_length=4 ,truncation=UpperCamelCase_ ,return_tensors="""np""" ,return_attention_mask=UpperCamelCase_ ,)
__SCREAMING_SNAKE_CASE = inputs.input_features
__SCREAMING_SNAKE_CASE = inputs.attention_mask
__SCREAMING_SNAKE_CASE = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1] )
self._check_zero_mean_unit_variance(input_features[2] )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
__SCREAMING_SNAKE_CASE = feature_extractor(
UpperCamelCase_ ,padding="""longest""" ,max_length=4 ,truncation=UpperCamelCase_ ,return_tensors="""np""" ,return_attention_mask=UpperCamelCase_ ,)
__SCREAMING_SNAKE_CASE = inputs.input_features
__SCREAMING_SNAKE_CASE = inputs.attention_mask
__SCREAMING_SNAKE_CASE = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape ,(3, 4, 24) )
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
__SCREAMING_SNAKE_CASE = feature_extractor(
UpperCamelCase_ ,padding="""longest""" ,max_length=16 ,truncation=UpperCamelCase_ ,return_tensors="""np""" ,return_attention_mask=UpperCamelCase_ ,)
__SCREAMING_SNAKE_CASE = inputs.input_features
__SCREAMING_SNAKE_CASE = inputs.attention_mask
__SCREAMING_SNAKE_CASE = np.sum(attention_mask == 1 ,axis=1 )
self._check_zero_mean_unit_variance(input_features[0, : fbank_feat_lengths[0]] )
self._check_zero_mean_unit_variance(input_features[1, : fbank_feat_lengths[1]] )
self._check_zero_mean_unit_variance(input_features[2] )
# make sure that if max_length < longest -> then pad to max_length
self.assertEqual(input_features.shape ,(3, 6, 24) )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
import torch
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE = np.random.rand(100 ,32 ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
__SCREAMING_SNAKE_CASE = feature_extractor.pad([{"""input_features""": inputs}] ,return_tensors="""np""" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
__SCREAMING_SNAKE_CASE = feature_extractor.pad([{"""input_features""": inputs}] ,return_tensors="""pt""" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
from datasets import load_dataset
__SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" )
# automatic decoding with librispeech
__SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(UpperCamelCase_ ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.array([
-1.5_745, -1.7_713, -1.7_020, -1.6_069, -1.2_250, -1.1_105, -0.9_072, -0.8_241,
-1.2_310, -0.8_098, -0.3_320, -0.4_101, -0.7_985, -0.4_996, -0.8_213, -0.9_128,
-1.0_420, -1.1_286, -1.0_440, -0.7_999, -0.8_405, -1.2_275, -1.5_443, -1.4_625,
] )
# fmt: on
__SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
__SCREAMING_SNAKE_CASE = feature_extractor(UpperCamelCase_ ,return_tensors="""pt""" ).input_features
self.assertEquals(input_features.shape ,(1, 584, 24) )
self.assertTrue(np.allclose(input_features[0, 0, :30] ,UpperCamelCase_ ,atol=1E-4 ) )
| 713 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase , params=__UpperCAmelCase ).content , """html.parser""" )
__SCREAMING_SNAKE_CASE = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
__SCREAMING_SNAKE_CASE = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
a = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 13 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import (
is_pt_tf_cross_test,
require_tf,
require_torch,
require_torchvision,
require_vision,
)
from transformers.utils import is_tf_available, is_torch_available, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AutoProcessor, SamImageProcessor, SamProcessor
if is_torch_available():
import torch
if is_tf_available():
import tensorflow as tf
@require_vision
@require_torchvision
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = SamImageProcessor()
__SCREAMING_SNAKE_CASE = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ,**lowerCamelCase : Optional[int] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase ).image_processor
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=lowerCamelCase ,padding_value=1.0 )
__SCREAMING_SNAKE_CASE = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=lowerCamelCase ,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(lowerCamelCase ,return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = processor(images=lowerCamelCase ,return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop original_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [torch.ones((1, 3, 5, 5) )]
__SCREAMING_SNAKE_CASE = [[1764, 2646]]
__SCREAMING_SNAKE_CASE = [[683, 1024]]
__SCREAMING_SNAKE_CASE = processor.post_process_masks(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
__SCREAMING_SNAKE_CASE = processor.post_process_masks(
lowerCamelCase ,torch.tensor(lowerCamelCase ) ,torch.tensor(lowerCamelCase ) )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
# should also work with np
__SCREAMING_SNAKE_CASE = [np.ones((1, 3, 5, 5) )]
__SCREAMING_SNAKE_CASE = processor.post_process_masks(lowerCamelCase ,np.array(lowerCamelCase ) ,np.array(lowerCamelCase ) )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
__SCREAMING_SNAKE_CASE = [[1, 0], [0, 1]]
with self.assertRaises(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = processor.post_process_masks(lowerCamelCase ,np.array(lowerCamelCase ) ,np.array(lowerCamelCase ) )
@require_vision
@require_tf
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = SamImageProcessor()
__SCREAMING_SNAKE_CASE = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Union[str, Any] ,**lowerCamelCase : List[Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase ).image_processor
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=lowerCamelCase ,padding_value=1.0 )
__SCREAMING_SNAKE_CASE = SamProcessor.from_pretrained(self.tmpdirname ,do_normalize=lowerCamelCase ,padding_value=1.0 )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(lowerCamelCase ,return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = processor(images=lowerCamelCase ,return_tensors="""np""" )
input_feat_extract.pop("""original_sizes""" ) # pop original_sizes as it is popped in the processor
input_feat_extract.pop("""reshaped_input_sizes""" ) # pop reshaped_input_sizes as it is popped in the processor
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
@require_tf
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [tf.ones((1, 3, 5, 5) )]
__SCREAMING_SNAKE_CASE = [[1764, 2646]]
__SCREAMING_SNAKE_CASE = [[683, 1024]]
__SCREAMING_SNAKE_CASE = processor.post_process_masks(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,return_tensors="""tf""" )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
__SCREAMING_SNAKE_CASE = processor.post_process_masks(
lowerCamelCase ,tf.convert_to_tensor(lowerCamelCase ) ,tf.convert_to_tensor(lowerCamelCase ) ,return_tensors="""tf""" ,)
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
# should also work with np
__SCREAMING_SNAKE_CASE = [np.ones((1, 3, 5, 5) )]
__SCREAMING_SNAKE_CASE = processor.post_process_masks(
lowerCamelCase ,np.array(lowerCamelCase ) ,np.array(lowerCamelCase ) ,return_tensors="""tf""" )
self.assertEqual(masks[0].shape ,(1, 3, 1764, 2646) )
__SCREAMING_SNAKE_CASE = [[1, 0], [0, 1]]
with self.assertRaises(tf.errors.InvalidArgumentError ):
__SCREAMING_SNAKE_CASE = processor.post_process_masks(
lowerCamelCase ,np.array(lowerCamelCase ) ,np.array(lowerCamelCase ) ,return_tensors="""tf""" )
@require_vision
@require_torchvision
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = SamImageProcessor()
__SCREAMING_SNAKE_CASE = SamProcessor(lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase ).image_processor
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
@is_pt_tf_cross_test
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = np.random.randint(0 ,2 ,size=(1, 3, 5, 5) ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = [tf.convert_to_tensor(lowerCamelCase )]
__SCREAMING_SNAKE_CASE = [torch.tensor(lowerCamelCase )]
__SCREAMING_SNAKE_CASE = [[1764, 2646]]
__SCREAMING_SNAKE_CASE = [[683, 1024]]
__SCREAMING_SNAKE_CASE = processor.post_process_masks(
lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,return_tensors="""tf""" )
__SCREAMING_SNAKE_CASE = processor.post_process_masks(
lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,return_tensors="""pt""" )
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy() ) )
@is_pt_tf_cross_test
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = SamProcessor(image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(lowerCamelCase ,return_tensors="""pt""" )["pixel_values"].numpy()
__SCREAMING_SNAKE_CASE = processor(images=lowerCamelCase ,return_tensors="""pt""" )["pixel_values"].numpy()
__SCREAMING_SNAKE_CASE = image_processor(lowerCamelCase ,return_tensors="""tf""" )["pixel_values"].numpy()
__SCREAMING_SNAKE_CASE = processor(images=lowerCamelCase ,return_tensors="""tf""" )["pixel_values"].numpy()
self.assertTrue(np.allclose(lowerCamelCase ,lowerCamelCase ) )
self.assertTrue(np.allclose(lowerCamelCase ,lowerCamelCase ) )
self.assertTrue(np.allclose(lowerCamelCase ,lowerCamelCase ) )
| 714 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'camembert'
def __init__( self : int ,lowerCamelCase : List[Any]=3_0522 ,lowerCamelCase : List[Any]=768 ,lowerCamelCase : str=12 ,lowerCamelCase : List[str]=12 ,lowerCamelCase : Optional[Any]=3072 ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=512 ,lowerCamelCase : Dict=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : List[Any]=1E-1_2 ,lowerCamelCase : Union[str, Any]=1 ,lowerCamelCase : Optional[Any]=0 ,lowerCamelCase : List[Any]=2 ,lowerCamelCase : List[str]="absolute" ,lowerCamelCase : int=True ,lowerCamelCase : Any=None ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class __a ( _snake_case ):
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 13 | 0 |
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __a ( _snake_case ):
__UpperCamelCase : List[Any] = (CMStochasticIterativeScheduler,)
__UpperCamelCase : Tuple = 10
def UpperCAmelCase__ ( self : Any ,**lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**lowercase_ )
return config
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0](**lowercase_ )
scheduler.set_timesteps(lowercase_ )
__SCREAMING_SNAKE_CASE = scheduler.timesteps[0]
__SCREAMING_SNAKE_CASE = scheduler.timesteps[1]
__SCREAMING_SNAKE_CASE = self.dummy_sample
__SCREAMING_SNAKE_CASE = 0.1 * sample
__SCREAMING_SNAKE_CASE = scheduler.step(lowercase_ ,lowercase_ ,lowercase_ ).prev_sample
__SCREAMING_SNAKE_CASE = scheduler.step(lowercase_ ,lowercase_ ,lowercase_ ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=lowercase_ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=lowercase_ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**lowercase_ )
__SCREAMING_SNAKE_CASE = 1
scheduler.set_timesteps(lowercase_ )
__SCREAMING_SNAKE_CASE = scheduler.timesteps
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(lowercase_ ):
# 1. scale model input
__SCREAMING_SNAKE_CASE = scheduler.scale_model_input(lowercase_ ,lowercase_ )
# 2. predict noise residual
__SCREAMING_SNAKE_CASE = model(lowercase_ ,lowercase_ )
# 3. predict previous sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(lowercase_ ,lowercase_ ,lowercase_ ,generator=lowercase_ ).prev_sample
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(lowercase_ ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 192.7614 ) < 1E-2
assert abs(result_mean.item() - 0.2_510 ) < 1E-3
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**lowercase_ )
__SCREAMING_SNAKE_CASE = [106, 0]
scheduler.set_timesteps(timesteps=lowercase_ )
__SCREAMING_SNAKE_CASE = scheduler.timesteps
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
__SCREAMING_SNAKE_CASE = scheduler.scale_model_input(lowercase_ ,lowercase_ )
# 2. predict noise residual
__SCREAMING_SNAKE_CASE = model(lowercase_ ,lowercase_ )
# 3. predict previous sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(lowercase_ ,lowercase_ ,lowercase_ ,generator=lowercase_ ).prev_sample
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(lowercase_ ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(lowercase_ ) )
assert abs(result_sum.item() - 347.6357 ) < 1E-2
assert abs(result_mean.item() - 0.4_527 ) < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**lowercase_ )
__SCREAMING_SNAKE_CASE = [39, 30, 12, 15, 0]
with self.assertRaises(lowercase_ ,msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=lowercase_ )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**lowercase_ )
__SCREAMING_SNAKE_CASE = [39, 30, 12, 1, 0]
__SCREAMING_SNAKE_CASE = len(lowercase_ )
with self.assertRaises(lowercase_ ,msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=lowercase_ ,timesteps=lowercase_ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**lowercase_ )
__SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps]
with self.assertRaises(
lowercase_ ,msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" ,):
scheduler.set_timesteps(timesteps=lowercase_ )
| 715 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __a ( unittest.TestCase ):
def __init__( self : Optional[int] ,lowerCamelCase : str ,lowerCamelCase : List[str]=13 ,lowerCamelCase : Optional[Any]=30 ,lowerCamelCase : Dict=2 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : List[str]=True ,lowerCamelCase : str=True ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Dict=5 ,lowerCamelCase : Optional[int]=4 ,lowerCamelCase : List[Any]=37 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Any=0.1 ,lowerCamelCase : str=10 ,lowerCamelCase : Dict=0.02 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase ,initializer_range=self.initializer_range ,)
return config, pixel_values
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModel(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(lowerCamelCase )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase ,hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
@jax.jit
def model_jitted(lowerCamelCase : int ,**lowerCamelCase : Union[str, Any] ):
return model(pixel_values=lowerCamelCase ,**lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) ,len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase ,lowerCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase )
| 13 | 0 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = [144, 192, 240]
__SCREAMING_SNAKE_CASE : Optional[int] = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
__SCREAMING_SNAKE_CASE : Any = [96, 120, 144]
__SCREAMING_SNAKE_CASE : Dict = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
__SCREAMING_SNAKE_CASE : List[str] = [64, 80, 96]
__SCREAMING_SNAKE_CASE : Union[str, Any] = [16, 16, 24, 48, 64, 80, 320]
__SCREAMING_SNAKE_CASE : List[Any] = 0.0_5
__SCREAMING_SNAKE_CASE : str = 2.0
if mobilevit_name.startswith("""deeplabv3_""" ):
__SCREAMING_SNAKE_CASE : str = 512
__SCREAMING_SNAKE_CASE : Any = 16
__SCREAMING_SNAKE_CASE : Optional[Any] = 21
__SCREAMING_SNAKE_CASE : Dict = """pascal-voc-id2label.json"""
else:
__SCREAMING_SNAKE_CASE : Dict = 1000
__SCREAMING_SNAKE_CASE : Any = """imagenet-1k-id2label.json"""
__SCREAMING_SNAKE_CASE : str = """huggingface/label-files"""
__SCREAMING_SNAKE_CASE : List[Any] = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="""dataset""" ) , """r""" ) )
__SCREAMING_SNAKE_CASE : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
__SCREAMING_SNAKE_CASE : Union[str, Any] = idalabel
__SCREAMING_SNAKE_CASE : str = {v: k for k, v in idalabel.items()}
return config
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
for i in range(1 , 6 ):
if f"""layer_{i}.""" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace(f"""layer_{i}.""" , f"""encoder.layer.{i - 1}.""" )
if "conv_1." in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace("""conv_1.""" , """conv_stem.""" )
if ".block." in name:
__SCREAMING_SNAKE_CASE : Any = name.replace(""".block.""" , """.""" )
if "exp_1x1" in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""exp_1x1""" , """expand_1x1""" )
if "red_1x1" in name:
__SCREAMING_SNAKE_CASE : Union[str, Any] = name.replace("""red_1x1""" , """reduce_1x1""" )
if ".local_rep.conv_3x3." in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""" )
if ".local_rep.conv_1x1." in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""" )
if ".norm." in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace(""".norm.""" , """.normalization.""" )
if ".conv." in name:
__SCREAMING_SNAKE_CASE : int = name.replace(""".conv.""" , """.convolution.""" )
if ".conv_proj." in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace(""".conv_proj.""" , """.conv_projection.""" )
for i in range(0 , 2 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace(f""".{i}.{j}.""" , f""".{i}.layer.{j}.""" )
for i in range(2 , 6 ):
for j in range(0 , 4 ):
if f""".{i}.{j}.""" in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace(f""".{i}.{j}.""" , f""".{i}.""" )
if "expand_1x1" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""" )
if "conv_3x3" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""" )
if "reduce_1x1" in name:
__SCREAMING_SNAKE_CASE : Any = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""" )
for i in range(2 , 5 ):
if f""".global_rep.{i}.weight""" in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace(f""".global_rep.{i}.weight""" , """.layernorm.weight""" )
if f""".global_rep.{i}.bias""" in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace(f""".global_rep.{i}.bias""" , """.layernorm.bias""" )
if ".global_rep." in name:
__SCREAMING_SNAKE_CASE : str = name.replace(""".global_rep.""" , """.transformer.""" )
if ".pre_norm_mha.0." in name:
__SCREAMING_SNAKE_CASE : Optional[int] = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""" )
if ".pre_norm_mha.1.out_proj." in name:
__SCREAMING_SNAKE_CASE : int = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""" )
if ".pre_norm_ffn.0." in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""" )
if ".pre_norm_ffn.1." in name:
__SCREAMING_SNAKE_CASE : List[str] = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""" )
if ".pre_norm_ffn.4." in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""" )
if ".transformer." in name:
__SCREAMING_SNAKE_CASE : List[Any] = name.replace(""".transformer.""" , """.transformer.layer.""" )
if ".aspp_layer." in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace(""".aspp_layer.""" , """.""" )
if ".aspp_pool." in name:
__SCREAMING_SNAKE_CASE : str = name.replace(""".aspp_pool.""" , """.""" )
if "seg_head." in name:
__SCREAMING_SNAKE_CASE : Tuple = name.replace("""seg_head.""" , """segmentation_head.""" )
if "segmentation_head.classifier.classifier." in name:
__SCREAMING_SNAKE_CASE : Any = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""" )
if "classifier.fc." in name:
__SCREAMING_SNAKE_CASE : Optional[Any] = name.replace("""classifier.fc.""" , """classifier.""" )
elif (not base_model) and ("segmentation_head." not in name):
__SCREAMING_SNAKE_CASE : List[str] = """mobilevit.""" + name
return name
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Optional[Any]:
'''simple docstring'''
if base_model:
__SCREAMING_SNAKE_CASE : str = """"""
else:
__SCREAMING_SNAKE_CASE : Dict = """mobilevit."""
for key in orig_state_dict.copy().keys():
__SCREAMING_SNAKE_CASE : Union[str, Any] = orig_state_dict.pop(snake_case__ )
if key[:8] == "encoder.":
__SCREAMING_SNAKE_CASE : Any = key[8:]
if "qkv" in key:
__SCREAMING_SNAKE_CASE : Union[str, Any] = key.split(""".""" )
__SCREAMING_SNAKE_CASE : Any = int(key_split[0][6:] ) - 1
__SCREAMING_SNAKE_CASE : Union[str, Any] = int(key_split[3] )
__SCREAMING_SNAKE_CASE : List[Any] = model.get_submodule(f"""{model_prefix}encoder.layer.{layer_num}""" )
__SCREAMING_SNAKE_CASE : List[str] = layer.transformer.layer[transformer_num].attention.attention.all_head_size
__SCREAMING_SNAKE_CASE : Optional[int] = (
f"""{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention."""
)
if "weight" in key:
__SCREAMING_SNAKE_CASE : str = val[:dim, :]
__SCREAMING_SNAKE_CASE : Tuple = val[dim : dim * 2, :]
__SCREAMING_SNAKE_CASE : Tuple = val[-dim:, :]
else:
__SCREAMING_SNAKE_CASE : Dict = val[:dim]
__SCREAMING_SNAKE_CASE : Any = val[dim : dim * 2]
__SCREAMING_SNAKE_CASE : str = val[-dim:]
else:
__SCREAMING_SNAKE_CASE : str = val
return orig_state_dict
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
__SCREAMING_SNAKE_CASE : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=False ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Optional[Any] = get_mobilevit_config(snake_case__ )
# load original state_dict
__SCREAMING_SNAKE_CASE : Any = torch.load(snake_case__ , map_location="""cpu""" )
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_""" ):
__SCREAMING_SNAKE_CASE : Optional[Any] = MobileViTForSemanticSegmentation(snake_case__ ).eval()
else:
__SCREAMING_SNAKE_CASE : Any = MobileViTForImageClassification(snake_case__ ).eval()
__SCREAMING_SNAKE_CASE : Union[str, Any] = convert_state_dict(snake_case__ , snake_case__ )
model.load_state_dict(snake_case__ )
# Check outputs on an image, prepared by MobileViTImageProcessor
__SCREAMING_SNAKE_CASE : str = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32 )
__SCREAMING_SNAKE_CASE : Union[str, Any] = image_processor(images=prepare_img() , return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE : Any = model(**snake_case__ )
__SCREAMING_SNAKE_CASE : int = outputs.logits
if mobilevit_name.startswith("""deeplabv3_""" ):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
__SCREAMING_SNAKE_CASE : List[str] = torch.tensor(
[
[[6.2_0_6_5, 6.1_2_9_2, 6.2_0_7_0], [6.1_0_7_9, 6.1_2_5_4, 6.1_7_4_7], [6.0_0_4_2, 6.1_0_7_1, 6.1_0_3_4]],
[[-6.9_2_5_3, -6.8_6_5_3, -7.0_3_9_8], [-7.3_2_1_8, -7.3_9_8_3, -7.3_6_7_0], [-7.1_9_6_1, -7.2_4_8_2, -7.1_5_6_9]],
[[-4.4_7_2_3, -4.4_3_4_8, -4.3_7_6_9], [-5.3_6_2_9, -5.4_6_3_2, -5.4_5_9_8], [-5.1_5_8_7, -5.3_4_0_2, -5.5_0_5_9]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xs":
__SCREAMING_SNAKE_CASE : Tuple = torch.tensor(
[
[[5.4_4_4_9, 5.5_7_3_3, 5.6_3_1_4], [5.1_8_1_5, 5.3_9_3_0, 5.5_9_6_3], [5.1_6_5_6, 5.4_3_3_3, 5.4_8_5_3]],
[[-9.4_4_2_3, -9.7_7_6_6, -9.6_7_1_4], [-9.1_5_8_1, -9.5_7_2_0, -9.5_5_1_9], [-9.1_0_0_6, -9.6_4_5_8, -9.5_7_0_3]],
[[-7.7_7_2_1, -7.3_7_1_6, -7.1_5_8_3], [-8.4_5_9_9, -8.0_6_2_4, -7.7_9_4_4], [-8.4_1_7_2, -7.8_3_6_6, -7.5_0_2_5]],
] )
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
__SCREAMING_SNAKE_CASE : int = torch.tensor(
[
[[6.9_8_1_1, 6.9_7_4_3, 7.3_1_2_3], [7.1_7_7_7, 7.1_9_3_1, 7.3_9_3_8], [7.5_6_3_3, 7.8_0_5_0, 7.8_9_0_1]],
[[-1_0.5_5_3_6, -1_0.2_3_3_2, -1_0.2_9_2_4], [-1_0.2_3_3_6, -9.8_6_2_4, -9.5_9_6_4], [-1_0.8_8_4_0, -1_0.8_1_5_8, -1_0.6_6_5_9]],
[[-3.4_9_3_8, -3.0_6_3_1, -2.8_6_2_0], [-3.4_2_0_5, -2.8_1_3_5, -2.6_8_7_5], [-3.4_1_7_9, -2.7_9_4_5, -2.8_7_5_0]],
] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3, :3, :3] , snake_case__ , atol=1e-4 )
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
__SCREAMING_SNAKE_CASE : Any = torch.tensor([-0.9_8_6_6, 0.2_3_9_2, -1.1_2_4_1] )
elif mobilevit_name == "mobilevit_xs":
__SCREAMING_SNAKE_CASE : Optional[Any] = torch.tensor([-2.4_7_6_1, -0.9_3_9_9, -1.9_5_8_7] )
elif mobilevit_name == "mobilevit_xxs":
__SCREAMING_SNAKE_CASE : Dict = torch.tensor([-1.9_3_6_4, -1.2_3_2_7, -0.4_6_5_3] )
else:
raise ValueError(f"""Unknown mobilevit_name: {mobilevit_name}""" )
assert torch.allclose(logits[0, :3] , snake_case__ , atol=1e-4 )
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(f"""Saving model {mobilevit_name} to {pytorch_dump_folder_path}""" )
model.save_pretrained(snake_case__ )
print(f"""Saving image processor to {pytorch_dump_folder_path}""" )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
__SCREAMING_SNAKE_CASE : Any = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""" )
__SCREAMING_SNAKE_CASE : Tuple = model_mapping[mobilevit_name]
image_processor.push_to_hub(snake_case__ , organization="""apple""" )
model.push_to_hub(snake_case__ , organization="""apple""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--mobilevit_name",
default="mobilevit_s",
type=str,
help=(
"Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\',"
" \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'."
),
)
parser.add_argument(
"--checkpoint_path", required=True, type=str, help="Path to the original state dict (.pt file)."
)
parser.add_argument(
"--pytorch_dump_folder_path", required=True, type=str, help="Path to the output PyTorch model directory."
)
parser.add_argument(
"--push_to_hub", action="store_true", help="Whether or not to push the converted model to the 🤗 hub."
)
a = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 716 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'sew'
def __init__( self : str ,lowerCamelCase : Any=32 ,lowerCamelCase : str=768 ,lowerCamelCase : str=12 ,lowerCamelCase : Union[str, Any]=12 ,lowerCamelCase : Union[str, Any]=3072 ,lowerCamelCase : int=2 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : List[str]=1E-5 ,lowerCamelCase : Tuple="group" ,lowerCamelCase : Optional[Any]="gelu" ,lowerCamelCase : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,lowerCamelCase : Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,lowerCamelCase : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Dict=128 ,lowerCamelCase : Union[str, Any]=16 ,lowerCamelCase : List[Any]=True ,lowerCamelCase : List[Any]=0.05 ,lowerCamelCase : Optional[int]=10 ,lowerCamelCase : Any=2 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Tuple=10 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple="mean" ,lowerCamelCase : int=False ,lowerCamelCase : Dict=False ,lowerCamelCase : Optional[int]=256 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : Tuple=2 ,**lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase ,pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = squeeze_factor
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# sequence classification
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE = classifier_proj_size
@property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 13 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import List, Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
'google/efficientnet-b7': 'https://huggingface.co/google/efficientnet-b7/resolve/main/config.json',
}
class __a ( _snake_case ):
__UpperCamelCase : List[str] = 'efficientnet'
def __init__( self : int ,lowerCamelCase : int = 3 ,lowerCamelCase : int = 600 ,lowerCamelCase : float = 2.0 ,lowerCamelCase : float = 3.1 ,lowerCamelCase : int = 8 ,lowerCamelCase : List[int] = [3, 3, 5, 3, 5, 5, 3] ,lowerCamelCase : List[int] = [32, 16, 24, 40, 80, 112, 192] ,lowerCamelCase : List[int] = [16, 24, 40, 80, 112, 192, 320] ,lowerCamelCase : List[int] = [] ,lowerCamelCase : List[int] = [1, 2, 2, 2, 1, 2, 1] ,lowerCamelCase : List[int] = [1, 2, 2, 3, 3, 4, 1] ,lowerCamelCase : List[int] = [1, 6, 6, 6, 6, 6, 6] ,lowerCamelCase : float = 0.25 ,lowerCamelCase : str = "swish" ,lowerCamelCase : int = 2560 ,lowerCamelCase : str = "mean" ,lowerCamelCase : float = 0.02 ,lowerCamelCase : float = 0.001 ,lowerCamelCase : float = 0.99 ,lowerCamelCase : float = 0.5 ,lowerCamelCase : float = 0.2 ,**lowerCamelCase : List[str] ,):
'''simple docstring'''
super().__init__(**UpperCAmelCase__ )
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = width_coefficient
__SCREAMING_SNAKE_CASE = depth_coefficient
__SCREAMING_SNAKE_CASE = depth_divisor
__SCREAMING_SNAKE_CASE = kernel_sizes
__SCREAMING_SNAKE_CASE = in_channels
__SCREAMING_SNAKE_CASE = out_channels
__SCREAMING_SNAKE_CASE = depthwise_padding
__SCREAMING_SNAKE_CASE = strides
__SCREAMING_SNAKE_CASE = num_block_repeats
__SCREAMING_SNAKE_CASE = expand_ratios
__SCREAMING_SNAKE_CASE = squeeze_expansion_ratio
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dim
__SCREAMING_SNAKE_CASE = pooling_type
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = batch_norm_eps
__SCREAMING_SNAKE_CASE = batch_norm_momentum
__SCREAMING_SNAKE_CASE = dropout_rate
__SCREAMING_SNAKE_CASE = drop_connect_rate
__SCREAMING_SNAKE_CASE = sum(UpperCAmelCase__ ) * 4
class __a ( _snake_case ):
__UpperCamelCase : List[str] = version.parse('1.11' )
@property
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return OrderedDict(
[
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
] )
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
return 1E-5
| 717 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 1 , __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
for divide_by_number in range(__UpperCAmelCase , digit + 1 ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = divide_by_number
else:
has_been_divided.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
'''simple docstring'''
from typing import Dict, Iterable, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format, to_pil_image
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_pytesseract_available, is_vision_available, logging, requires_backends
if is_vision_available():
import PIL
# soft dependency
if is_pytesseract_available():
import pytesseract
a = logging.get_logger(__name__)
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
return [
int(1000 * (box[0] / width) ),
int(1000 * (box[1] / height) ),
int(1000 * (box[2] / width) ),
int(1000 * (box[3] / height) ),
]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = to_pil_image(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = pil_image.size
__SCREAMING_SNAKE_CASE = pytesseract.image_to_data(__UpperCAmelCase , lang=__UpperCAmelCase , output_type="""dict""" , config=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = data["text"], data["left"], data["top"], data["width"], data["height"]
# filter empty words and corresponding coordinates
__SCREAMING_SNAKE_CASE = [idx for idx, word in enumerate(__UpperCAmelCase ) if not word.strip()]
__SCREAMING_SNAKE_CASE = [word for idx, word in enumerate(__UpperCAmelCase ) if idx not in irrelevant_indices]
__SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(__UpperCAmelCase ) if idx not in irrelevant_indices]
__SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(__UpperCAmelCase ) if idx not in irrelevant_indices]
__SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(__UpperCAmelCase ) if idx not in irrelevant_indices]
__SCREAMING_SNAKE_CASE = [coord for idx, coord in enumerate(__UpperCAmelCase ) if idx not in irrelevant_indices]
# turn coordinates into (left, top, left+width, top+height) format
__SCREAMING_SNAKE_CASE = []
for x, y, w, h in zip(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = [x, y, x + w, y + h]
actual_boxes.append(__UpperCAmelCase )
# finally, normalize the bounding boxes
__SCREAMING_SNAKE_CASE = []
for box in actual_boxes:
normalized_boxes.append(normalize_box(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) )
assert len(__UpperCAmelCase ) == len(__UpperCAmelCase ), "Not as many words as there are bounding boxes"
return words, normalized_boxes
class __a ( SCREAMING_SNAKE_CASE__ ):
__UpperCamelCase : int = ['pixel_values']
def __init__( self : Any ,lowerCamelCase : Any = True ,lowerCamelCase : Optional[Any] = None ,lowerCamelCase : int = PILImageResampling.BILINEAR ,lowerCamelCase : List[str] = True ,lowerCamelCase : Dict = 1 / 255 ,lowerCamelCase : List[Any] = True ,lowerCamelCase : str = None ,lowerCamelCase : Optional[Any] = None ,lowerCamelCase : int = True ,lowerCamelCase : int = None ,lowerCamelCase : List[Any] = "" ,**lowerCamelCase : Tuple ,):
'''simple docstring'''
super().__init__(**snake_case__ )
__SCREAMING_SNAKE_CASE = size if size is not None else {"height": 224, "width": 224}
__SCREAMING_SNAKE_CASE = get_size_dict(snake_case__ )
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = resample
__SCREAMING_SNAKE_CASE = do_rescale
__SCREAMING_SNAKE_CASE = rescale_value
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else IMAGENET_STANDARD_STD
__SCREAMING_SNAKE_CASE = apply_ocr
__SCREAMING_SNAKE_CASE = ocr_lang
__SCREAMING_SNAKE_CASE = tesseract_config
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[str] ,lowerCamelCase : Any ,lowerCamelCase : Optional[Any] = PILImageResampling.BILINEAR ,lowerCamelCase : str = None ,**lowerCamelCase : Dict ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_size_dict(snake_case__ )
if "height" not in size or "width" not in size:
raise ValueError(f"""The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}""" )
__SCREAMING_SNAKE_CASE = (size["height"], size["width"])
return resize(snake_case__ ,size=snake_case__ ,resample=snake_case__ ,data_format=snake_case__ ,**snake_case__ )
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Optional[int] ,lowerCamelCase : List[Any] ,lowerCamelCase : Optional[Any] = None ,**lowerCamelCase : Optional[int] ,):
'''simple docstring'''
return rescale(snake_case__ ,scale=snake_case__ ,data_format=snake_case__ ,**snake_case__ )
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Dict ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Dict = None ,**lowerCamelCase : Tuple ,):
'''simple docstring'''
return normalize(snake_case__ ,mean=snake_case__ ,std=snake_case__ ,data_format=snake_case__ ,**snake_case__ )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Any ,lowerCamelCase : str = None ,lowerCamelCase : Dict = None ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : List[Any] = None ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : int = None ,lowerCamelCase : List[str] = None ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : int = None ,lowerCamelCase : int = None ,lowerCamelCase : str = None ,lowerCamelCase : List[Any] = None ,lowerCamelCase : Optional[Any] = ChannelDimension.FIRST ,**lowerCamelCase : List[str] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = do_resize if do_resize is not None else self.do_resize
__SCREAMING_SNAKE_CASE = size if size is not None else self.size
__SCREAMING_SNAKE_CASE = get_size_dict(snake_case__ )
__SCREAMING_SNAKE_CASE = resample if resample is not None else self.resample
__SCREAMING_SNAKE_CASE = do_rescale if do_rescale is not None else self.do_rescale
__SCREAMING_SNAKE_CASE = rescale_factor if rescale_factor is not None else self.rescale_factor
__SCREAMING_SNAKE_CASE = do_normalize if do_normalize is not None else self.do_normalize
__SCREAMING_SNAKE_CASE = image_mean if image_mean is not None else self.image_mean
__SCREAMING_SNAKE_CASE = image_std if image_std is not None else self.image_std
__SCREAMING_SNAKE_CASE = apply_ocr if apply_ocr is not None else self.apply_ocr
__SCREAMING_SNAKE_CASE = ocr_lang if ocr_lang is not None else self.ocr_lang
__SCREAMING_SNAKE_CASE = tesseract_config if tesseract_config is not None else self.tesseract_config
__SCREAMING_SNAKE_CASE = make_list_of_images(snake_case__ )
if not valid_images(snake_case__ ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""If do_normalize is True, image_mean and image_std must be specified.""" )
# All transformations expect numpy arrays.
__SCREAMING_SNAKE_CASE = [to_numpy_array(snake_case__ ) for image in images]
# Tesseract OCR to get words + normalized bounding boxes
if apply_ocr:
requires_backends(self ,"""pytesseract""" )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for image in images:
__SCREAMING_SNAKE_CASE = apply_tesseract(snake_case__ ,snake_case__ ,snake_case__ )
words_batch.append(snake_case__ )
boxes_batch.append(snake_case__ )
if do_resize:
__SCREAMING_SNAKE_CASE = [self.resize(image=snake_case__ ,size=snake_case__ ,resample=snake_case__ ) for image in images]
if do_rescale:
__SCREAMING_SNAKE_CASE = [self.rescale(image=snake_case__ ,scale=snake_case__ ) for image in images]
if do_normalize:
__SCREAMING_SNAKE_CASE = [self.normalize(image=snake_case__ ,mean=snake_case__ ,std=snake_case__ ) for image in images]
__SCREAMING_SNAKE_CASE = [to_channel_dimension_format(snake_case__ ,snake_case__ ) for image in images]
__SCREAMING_SNAKE_CASE = BatchFeature(data={"""pixel_values""": images} ,tensor_type=snake_case__ )
if apply_ocr:
__SCREAMING_SNAKE_CASE = words_batch
__SCREAMING_SNAKE_CASE = boxes_batch
return data
| 718 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str]=7 ,lowerCamelCase : List[str]=3 ,lowerCamelCase : List[str]=18 ,lowerCamelCase : Any=30 ,lowerCamelCase : Optional[Any]=400 ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=None ,lowerCamelCase : str=True ,lowerCamelCase : Dict=[0.48_145_466, 0.4_578_275, 0.40_821_073] ,lowerCamelCase : List[str]=[0.26_862_954, 0.26_130_258, 0.27_577_711] ,lowerCamelCase : Tuple=True ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 224, """width""": 224}
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
__SCREAMING_SNAKE_CASE = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Union[str, Any]=False ,lowerCamelCase : str=False ,lowerCamelCase : str=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) )
else:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 )
image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
if torchify:
__SCREAMING_SNAKE_CASE = [torch.from_numpy(lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,do_center_crop=lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 224, """width""": 224} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=lowerCamelCase )
__SCREAMING_SNAKE_CASE = 3
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 13 | 0 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __a ( metaclass=__lowerCamelCase ):
__UpperCamelCase : Optional[Any] = ['torch', 'torchsde']
def __init__( self : Optional[int] ,*lowerCamelCase : Optional[Any] ,**lowerCamelCase : Dict ):
'''simple docstring'''
requires_backends(self ,["""torch""", """torchsde"""] )
@classmethod
def UpperCAmelCase__ ( cls : Union[str, Any] ,*lowerCamelCase : Optional[int] ,**lowerCamelCase : Any ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] ,*lowerCamelCase : Dict ,**lowerCamelCase : List[Any] ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
| 719 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
def wrapper(*__UpperCAmelCase , **__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = timeit.default_timer()
__SCREAMING_SNAKE_CASE = func(*__UpperCAmelCase , **__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = timeit.default_timer() - starttime
return delta
__SCREAMING_SNAKE_CASE = func.__name__
return wrapper
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = seq_shapes or {}
for i in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__UpperCAmelCase , _ArrayXD ):
__SCREAMING_SNAKE_CASE = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__UpperCAmelCase , datasets.Value ):
if v.dtype == "string":
__SCREAMING_SNAKE_CASE = """The small grey turtle was surprisingly fast when challenged."""
else:
__SCREAMING_SNAKE_CASE = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__UpperCAmelCase , datasets.Sequence ):
while isinstance(__UpperCAmelCase , datasets.Sequence ):
__SCREAMING_SNAKE_CASE = v.feature
__SCREAMING_SNAKE_CASE = seq_shapes[k]
__SCREAMING_SNAKE_CASE = np.random.rand(*__UpperCAmelCase ).astype(v.dtype )
__SCREAMING_SNAKE_CASE = data
dummy_data.append((i, example) )
return dummy_data
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = generate_examples(__UpperCAmelCase , num_examples=__UpperCAmelCase , seq_shapes=__UpperCAmelCase )
with ArrowWriter(features=__UpperCAmelCase , path=__UpperCAmelCase ) as writer:
for key, record in dummy_data:
__SCREAMING_SNAKE_CASE = features.encode_example(__UpperCAmelCase )
writer.write(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
__SCREAMING_SNAKE_CASE = datasets.Dataset.from_file(filename=__UpperCAmelCase , info=datasets.DatasetInfo(features=__UpperCAmelCase ) )
return dataset
| 13 | 0 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class __a ( _snake_case ):
__UpperCamelCase : Dict = (DDPMScheduler,)
def UpperCAmelCase__ ( self : int ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {
"num_train_timesteps": 1000,
"beta_start": 0.0_001,
"beta_end": 0.02,
"beta_schedule": "linear",
"variance_type": "fixed_small",
"clip_sample": True,
}
config.update(**snake_case_ )
return config
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=snake_case_ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
for beta_start, beta_end in zip([0.0_001, 0.001, 0.01, 0.1] ,[0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=snake_case_ ,beta_end=snake_case_ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=snake_case_ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=snake_case_ )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=snake_case_ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
self.check_over_configs(thresholding=snake_case_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=snake_case_ ,prediction_type=snake_case_ ,sample_max_value=snake_case_ ,)
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=snake_case_ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for t in [0, 500, 999]:
self.check_over_forward(time_step=snake_case_ )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**snake_case_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.00_979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**snake_case_ )
__SCREAMING_SNAKE_CASE = len(snake_case_ )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(snake_case_ ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE = model(snake_case_ ,snake_case_ )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(snake_case_ ,snake_case_ ,snake_case_ ,generator=snake_case_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(snake_case_ ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 258.9_606 ) < 1E-2
assert abs(result_mean.item() - 0.3_372 ) < 1E-3
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE = scheduler_class(**snake_case_ )
__SCREAMING_SNAKE_CASE = len(snake_case_ )
__SCREAMING_SNAKE_CASE = self.dummy_model()
__SCREAMING_SNAKE_CASE = self.dummy_sample_deter
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
for t in reversed(range(snake_case_ ) ):
# 1. predict noise residual
__SCREAMING_SNAKE_CASE = model(snake_case_ ,snake_case_ )
# 2. predict previous mean of sample x_t-1
__SCREAMING_SNAKE_CASE = scheduler.step(snake_case_ ,snake_case_ ,snake_case_ ,generator=snake_case_ ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__SCREAMING_SNAKE_CASE = pred_prev_sample
__SCREAMING_SNAKE_CASE = torch.sum(torch.abs(snake_case_ ) )
__SCREAMING_SNAKE_CASE = torch.mean(torch.abs(snake_case_ ) )
assert abs(result_sum.item() - 202.0_296 ) < 1E-2
assert abs(result_mean.item() - 0.2_631 ) < 1E-3
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**snake_case_ )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=snake_case_ )
__SCREAMING_SNAKE_CASE = scheduler.timesteps
for i, timestep in enumerate(snake_case_ ):
if i == len(snake_case_ ) - 1:
__SCREAMING_SNAKE_CASE = -1
else:
__SCREAMING_SNAKE_CASE = timesteps[i + 1]
__SCREAMING_SNAKE_CASE = scheduler.previous_timestep(snake_case_ )
__SCREAMING_SNAKE_CASE = prev_t.item()
self.assertEqual(snake_case_ ,snake_case_ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**snake_case_ )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 51, 0]
with self.assertRaises(snake_case_ ,msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=snake_case_ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**snake_case_ )
__SCREAMING_SNAKE_CASE = [100, 87, 50, 1, 0]
__SCREAMING_SNAKE_CASE = len(snake_case_ )
with self.assertRaises(snake_case_ ,msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=snake_case_ ,timesteps=snake_case_ )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.scheduler_classes[0]
__SCREAMING_SNAKE_CASE = self.get_scheduler_config()
__SCREAMING_SNAKE_CASE = scheduler_class(**snake_case_ )
__SCREAMING_SNAKE_CASE = [scheduler.config.num_train_timesteps]
with self.assertRaises(
snake_case_ ,msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" ,):
scheduler.set_timesteps(timesteps=snake_case_ )
| 720 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a = "__DUMMY_TRANSFORMERS_USER__"
a = "Dummy User"
a = "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"
a = "https://hub-ci.huggingface.co"
a = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
a = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
a = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(__UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
return HfApi(endpoint=__UpperCAmelCase )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfFolder.get_token()
HfFolder.save_token(__UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
def _cleanup_repo(__UpperCAmelCase ):
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
@contextmanager
def _temporary_repo(__UpperCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(__UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 13 | 0 |
'''simple docstring'''
from typing import List, Optional, Tuple, Union
import torch
from ...schedulers import DDIMScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class __a ( lowercase__ ):
def __init__( self : Optional[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : int ):
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
__SCREAMING_SNAKE_CASE = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=UpperCAmelCase__ ,scheduler=UpperCAmelCase__ )
@torch.no_grad()
def __call__( self : List[Any] ,lowerCamelCase : int = 1 ,lowerCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,lowerCamelCase : float = 0.0 ,lowerCamelCase : int = 50 ,lowerCamelCase : Optional[bool] = None ,lowerCamelCase : Optional[str] = "pil" ,lowerCamelCase : bool = True ,):
'''simple docstring'''
if isinstance(self.unet.config.sample_size ,UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = (
batch_size,
self.unet.config.in_channels,
self.unet.config.sample_size,
self.unet.config.sample_size,
)
else:
__SCREAMING_SNAKE_CASE = (batch_size, self.unet.config.in_channels, *self.unet.config.sample_size)
if isinstance(UpperCAmelCase__ ,UpperCAmelCase__ ) and len(UpperCAmelCase__ ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(UpperCAmelCase__ )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
__SCREAMING_SNAKE_CASE = randn_tensor(UpperCAmelCase__ ,generator=UpperCAmelCase__ ,device=self.device ,dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(UpperCAmelCase__ )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
__SCREAMING_SNAKE_CASE = self.unet(UpperCAmelCase__ ,UpperCAmelCase__ ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
__SCREAMING_SNAKE_CASE = self.scheduler.step(
UpperCAmelCase__ ,UpperCAmelCase__ ,UpperCAmelCase__ ,eta=UpperCAmelCase__ ,use_clipped_model_output=UpperCAmelCase__ ,generator=UpperCAmelCase__ ).prev_sample
__SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 ,1 )
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(UpperCAmelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=UpperCAmelCase__ )
| 721 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : Dict = 'deta'
__UpperCamelCase : List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Any=900 ,lowerCamelCase : int=2048 ,lowerCamelCase : Any=6 ,lowerCamelCase : Optional[Any]=2048 ,lowerCamelCase : str=8 ,lowerCamelCase : Union[str, Any]=6 ,lowerCamelCase : List[str]=1024 ,lowerCamelCase : int=8 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Any=True ,lowerCamelCase : Optional[int]="relu" ,lowerCamelCase : int=256 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[Any]=0.0 ,lowerCamelCase : Tuple=0.0 ,lowerCamelCase : List[str]=0.02 ,lowerCamelCase : Any=1.0 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]="sine" ,lowerCamelCase : Dict=5 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Any=True ,lowerCamelCase : int=300 ,lowerCamelCase : Any=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : int=1 ,lowerCamelCase : Tuple=5 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : int=1 ,lowerCamelCase : str=5 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.25 ,**lowerCamelCase : int ,):
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase )
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = auxiliary_loss
__SCREAMING_SNAKE_CASE = position_embedding_type
# deformable attributes
__SCREAMING_SNAKE_CASE = num_feature_levels
__SCREAMING_SNAKE_CASE = encoder_n_points
__SCREAMING_SNAKE_CASE = decoder_n_points
__SCREAMING_SNAKE_CASE = two_stage
__SCREAMING_SNAKE_CASE = two_stage_num_proposals
__SCREAMING_SNAKE_CASE = with_box_refine
__SCREAMING_SNAKE_CASE = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
__SCREAMING_SNAKE_CASE = class_cost
__SCREAMING_SNAKE_CASE = bbox_cost
__SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE = mask_loss_coefficient
__SCREAMING_SNAKE_CASE = dice_loss_coefficient
__SCREAMING_SNAKE_CASE = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE = giou_loss_coefficient
__SCREAMING_SNAKE_CASE = eos_coefficient
__SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.d_model
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 13 | 0 |
a = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/"
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE = f"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_lowerCAmelCase )
__SCREAMING_SNAKE_CASE = """""".join(bin(_lowerCAmelCase )[2:].zfill(8 ) for byte in data )
__SCREAMING_SNAKE_CASE = len(_lowerCAmelCase ) % 6 != 0
if padding_needed:
# The padding that will be added later
__SCREAMING_SNAKE_CASE = b"""=""" * ((6 - len(_lowerCAmelCase ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_lowerCAmelCase ) % 6)
else:
__SCREAMING_SNAKE_CASE = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_lowerCAmelCase ) , 6 ) ).encode()
+ padding
)
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
if not isinstance(_lowerCAmelCase , _lowerCAmelCase ) and not isinstance(_lowerCAmelCase , _lowerCAmelCase ):
__SCREAMING_SNAKE_CASE = (
"""argument should be a bytes-like object or ASCII string, """
f"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_lowerCAmelCase )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_lowerCAmelCase , _lowerCAmelCase ):
try:
__SCREAMING_SNAKE_CASE = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
__SCREAMING_SNAKE_CASE = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_lowerCAmelCase ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
__SCREAMING_SNAKE_CASE = encoded_data[:-padding]
__SCREAMING_SNAKE_CASE = """""".join(
bin(B64_CHARSET.index(_lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
__SCREAMING_SNAKE_CASE = """""".join(
bin(B64_CHARSET.index(_lowerCAmelCase ) )[2:].zfill(6 ) for char in encoded_data )
__SCREAMING_SNAKE_CASE = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_lowerCAmelCase ) , 8 )
]
return bytes(_lowerCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape
__SCREAMING_SNAKE_CASE = jax.image.resize(
lowerCamelCase ,shape=(batch, height * 2, width * 2, channels) ,method="""nearest""" ,)
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : int = None
__UpperCamelCase : float = 0.0
__UpperCamelCase : bool = None
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = nn.Dense(lowerCamelCase ,dtype=self.dtype )
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__SCREAMING_SNAKE_CASE = None
if use_nin_shortcut:
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="""VALID""" ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(lowerCamelCase ,1 ) ,1 )
__SCREAMING_SNAKE_CASE = hidden_states + temb
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.dropout(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
if self.conv_shortcut is not None:
__SCREAMING_SNAKE_CASE = self.conv_shortcut(lowerCamelCase )
return hidden_states + residual
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import RoFormerConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForMultipleChoice,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerModel,
)
from transformers.models.roformer.modeling_tf_roformer import (
TFRoFormerSelfAttention,
TFRoFormerSinusoidalPositionalEmbedding,
)
class __a:
def __init__( self : Union[str, Any] ,lowerCamelCase : Dict ,lowerCamelCase : str=13 ,lowerCamelCase : Optional[Any]=7 ,lowerCamelCase : Tuple=True ,lowerCamelCase : Any=True ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : Optional[int]=99 ,lowerCamelCase : Union[str, Any]=32 ,lowerCamelCase : str=2 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : List[Any]=37 ,lowerCamelCase : str="gelu" ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : str=0.1 ,lowerCamelCase : Optional[int]=512 ,lowerCamelCase : List[Any]=16 ,lowerCamelCase : Optional[int]=2 ,lowerCamelCase : Dict=0.02 ,lowerCamelCase : int=3 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : int=None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = 13
__SCREAMING_SNAKE_CASE = 7
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = 99
__SCREAMING_SNAKE_CASE = 32
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = 37
__SCREAMING_SNAKE_CASE = '''gelu'''
__SCREAMING_SNAKE_CASE = 0.1
__SCREAMING_SNAKE_CASE = 0.1
__SCREAMING_SNAKE_CASE = 512
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = 2
__SCREAMING_SNAKE_CASE = 0.02
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 4
__SCREAMING_SNAKE_CASE = None
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
__SCREAMING_SNAKE_CASE = None
if self.use_input_mask:
__SCREAMING_SNAKE_CASE = random_attention_mask([self.batch_size, self.seq_length] )
__SCREAMING_SNAKE_CASE = None
if self.use_token_type_ids:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.num_choices )
__SCREAMING_SNAKE_CASE = RoFormerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,return_dict=__lowerCamelCase ,)
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : Dict ,lowerCamelCase : List[str] ,lowerCamelCase : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFRoFormerModel(config=__lowerCamelCase )
__SCREAMING_SNAKE_CASE = {'''input_ids''': input_ids, '''attention_mask''': input_mask, '''token_type_ids''': token_type_ids}
__SCREAMING_SNAKE_CASE = [input_ids, input_mask]
__SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : List[str] ,lowerCamelCase : Dict ,lowerCamelCase : str ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = TFRoFormerForCausalLM(config=__lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__SCREAMING_SNAKE_CASE = model(__lowerCamelCase )['''logits''']
self.parent.assertListEqual(
list(prediction_scores.numpy().shape ) ,[self.batch_size, self.seq_length, self.vocab_size] )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ,lowerCamelCase : Dict ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM(config=__lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Dict ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[int] ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = TFRoFormerForSequenceClassification(config=__lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : List[str] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Any ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : str ,lowerCamelCase : List[str] ,lowerCamelCase : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_choices
__SCREAMING_SNAKE_CASE = TFRoFormerForMultipleChoice(config=__lowerCamelCase )
__SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(__lowerCamelCase ,1 ) ,(1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(__lowerCamelCase ,1 ) ,(1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE = tf.tile(tf.expand_dims(__lowerCamelCase ,1 ) ,(1, self.num_choices, 1) )
__SCREAMING_SNAKE_CASE = {
'''input_ids''': multiple_choice_inputs_ids,
'''attention_mask''': multiple_choice_input_mask,
'''token_type_ids''': multiple_choice_token_type_ids,
}
__SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Dict ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Dict ,lowerCamelCase : List[Any] ,lowerCamelCase : Dict ,lowerCamelCase : Any ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.num_labels
__SCREAMING_SNAKE_CASE = TFRoFormerForTokenClassification(config=__lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[Any] ,lowerCamelCase : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFRoFormerForQuestionAnswering(config=__lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
'''input_ids''': input_ids,
'''attention_mask''': input_mask,
'''token_type_ids''': token_type_ids,
}
__SCREAMING_SNAKE_CASE = model(__lowerCamelCase )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
__SCREAMING_SNAKE_CASE
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_tf
class __a( lowerCamelCase__, lowerCamelCase__, unittest.TestCase ):
__UpperCamelCase : List[str] = (
(
TFRoFormerModel,
TFRoFormerForCausalLM,
TFRoFormerForMaskedLM,
TFRoFormerForQuestionAnswering,
TFRoFormerForSequenceClassification,
TFRoFormerForTokenClassification,
TFRoFormerForMultipleChoice,
)
if is_tf_available()
else ()
)
__UpperCamelCase : Optional[Any] = (
{
'feature-extraction': TFRoFormerModel,
'fill-mask': TFRoFormerForMaskedLM,
'question-answering': TFRoFormerForQuestionAnswering,
'text-classification': TFRoFormerForSequenceClassification,
'text-generation': TFRoFormerForCausalLM,
'token-classification': TFRoFormerForTokenClassification,
'zero-shot': TFRoFormerForSequenceClassification,
}
if is_tf_available()
else {}
)
__UpperCamelCase : str = False
__UpperCamelCase : Union[str, Any] = False
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : str ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[str] ,lowerCamelCase : List[Any] ):
'''simple docstring'''
if pipeline_test_casse_name == "TextGenerationPipelineTests":
return True
return False
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFRoFormerModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=__lowerCamelCase ,hidden_size=37 )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head(*__lowerCamelCase )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__lowerCamelCase )
@slow
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFRoFormerModel.from_pretrained("""junnyu/roformer_chinese_base""" )
self.assertIsNotNone(__lowerCamelCase )
@require_tf
class __a( unittest.TestCase ):
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
__SCREAMING_SNAKE_CASE = tf.constant([[0, 1, 2, 3, 4, 5]] )
__SCREAMING_SNAKE_CASE = model(__lowerCamelCase )[0]
# TODO Replace vocab size
__SCREAMING_SNAKE_CASE = 5_0000
__SCREAMING_SNAKE_CASE = [1, 6, vocab_size]
self.assertEqual(output.shape ,__lowerCamelCase )
print(output[:, :3, :3] )
# TODO Replace values below with what was printed above.
__SCREAMING_SNAKE_CASE = tf.constant(
[
[
[-0.12_053_341, -1.0_264_901, 0.29_221_946],
[-1.5_133_783, 0.197_433, 0.15_190_607],
[-5.0_135_403, -3.900_256, -0.84_038_764],
]
] )
tf.debugging.assert_near(output[:, :3, :3] ,__lowerCamelCase ,atol=1E-4 )
@require_tf
class __a( unittest.TestCase ):
__UpperCamelCase : Any = 1E-4
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tf.constant([[4, 10]] )
__SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=6 ,embedding_dim=6 )
__SCREAMING_SNAKE_CASE = emba(input_ids.shape )
__SCREAMING_SNAKE_CASE = tf.constant(
[[0.0_000, 0.0_000, 0.0_000, 1.0_000, 1.0_000, 1.0_000], [0.8_415, 0.0_464, 0.0_022, 0.5_403, 0.9_989, 1.0_000]] )
tf.debugging.assert_near(__lowerCamelCase ,__lowerCamelCase ,atol=self.tolerance )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0_000, 0.0_000, 0.0_000, 0.0_000, 0.0_000],
[0.8_415, 0.8_219, 0.8_020, 0.7_819, 0.7_617],
[0.9_093, 0.9_364, 0.9_581, 0.9_749, 0.9_870],
] )
__SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=512 ,embedding_dim=512 )
emba([2, 16, 512] )
__SCREAMING_SNAKE_CASE = emba.weight[:3, :5]
tf.debugging.assert_near(__lowerCamelCase ,__lowerCamelCase ,atol=self.tolerance )
@require_tf
class __a( unittest.TestCase ):
__UpperCamelCase : Any = 1E-4
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tf.reshape(tf.range(2 * 12 * 16 * 64 ,dtype=tf.floataa ) ,shape=(2, 12, 16, 64) ) / 100
__SCREAMING_SNAKE_CASE = -tf.reshape(tf.range(2 * 12 * 16 * 64 ,dtype=tf.floataa ) ,shape=(2, 12, 16, 64) ) / 100
__SCREAMING_SNAKE_CASE = TFRoFormerSinusoidalPositionalEmbedding(num_positions=32 ,embedding_dim=64 )
__SCREAMING_SNAKE_CASE = embed_positions([2, 16, 768] )[None, None, :, :]
__SCREAMING_SNAKE_CASE = TFRoFormerSelfAttention.apply_rotary_position_embeddings(
__lowerCamelCase ,__lowerCamelCase ,__lowerCamelCase )
__SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0_000, 0.0_100, 0.0_200, 0.0_300, 0.0_400, 0.0_500, 0.0_600, 0.0_700],
[-0.2_012, 0.8_897, 0.0_263, 0.9_401, 0.2_074, 0.9_463, 0.3_481, 0.9_343],
[-1.7_057, 0.6_271, -1.2_145, 1.3_897, -0.6_303, 1.7_647, -0.1_173, 1.8_985],
[-2.1_731, -1.6_397, -2.7_358, 0.2_854, -2.1_840, 1.7_183, -1.3_018, 2.4_871],
[0.2_717, -3.6_173, -2.9_206, -2.1_988, -3.6_638, 0.3_858, -2.9_155, 2.2_980],
[3.9_859, -2.1_580, -0.7_984, -4.4_904, -4.1_181, -2.0_252, -4.4_782, 1.1_253],
] )
__SCREAMING_SNAKE_CASE = tf.constant(
[
[0.0_000, -0.0_100, -0.0_200, -0.0_300, -0.0_400, -0.0_500, -0.0_600, -0.0_700],
[0.2_012, -0.8_897, -0.0_263, -0.9_401, -0.2_074, -0.9_463, -0.3_481, -0.9_343],
[1.7_057, -0.6_271, 1.2_145, -1.3_897, 0.6_303, -1.7_647, 0.1_173, -1.8_985],
[2.1_731, 1.6_397, 2.7_358, -0.2_854, 2.1_840, -1.7_183, 1.3_018, -2.4_871],
[-0.2_717, 3.6_173, 2.9_206, 2.1_988, 3.6_638, -0.3_858, 2.9_155, -2.2_980],
[-3.9_859, 2.1_580, 0.7_984, 4.4_904, 4.1_181, 2.0_252, 4.4_782, -1.1_253],
] )
tf.debugging.assert_near(query_layer[0, 0, :6, :8] ,__lowerCamelCase ,atol=self.tolerance )
tf.debugging.assert_near(key_layer[0, 0, :6, :8] ,__lowerCamelCase ,atol=self.tolerance )
| 701 |
'''simple docstring'''
import sys
from collections import defaultdict
class __a :
def __init__( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.node_position[vertex]
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pos
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = temp, tempa
__SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,lowerCamelCase )
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE = heap[parent]
__SCREAMING_SNAKE_CASE = position[parent]
self.set_position(position[parent] ,lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,lowerCamelCase )
break
__SCREAMING_SNAKE_CASE = parent
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,0 )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(lowerCamelCase ) // 2 - 1
for i in range(lowerCamelCase ,-1 ,-1 ):
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,len(lowerCamelCase ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = positions[0]
__SCREAMING_SNAKE_CASE = sys.maxsize
self.top_to_bottom(lowerCamelCase ,0 ,len(lowerCamelCase ) ,lowerCamelCase )
return temp
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Heap()
__SCREAMING_SNAKE_CASE = [0] * len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE = []
for vertex in range(len(__UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__UpperCAmelCase )
heap.node_position.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = distance
heap.heapify(__UpperCAmelCase , __UpperCAmelCase )
for _ in range(1 , len(__UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__UpperCAmelCase )]
):
__SCREAMING_SNAKE_CASE = distance
heap.bottom_to_top(
__UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
a = int(input("Enter number of edges: ").strip())
a = defaultdict(list)
for _ in range(edges_number):
a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13 | 0 |
'''simple docstring'''
import random
class __a :
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [ord(_lowercase ) for i in text]
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = []
for i in plain:
__SCREAMING_SNAKE_CASE = random.randint(1 ,300 )
__SCREAMING_SNAKE_CASE = (i + k) * k
cipher.append(_lowercase )
key.append(_lowercase )
return cipher, key
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase : list[int] ,lowerCamelCase : list[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for i in range(len(_lowercase ) ):
__SCREAMING_SNAKE_CASE = int((cipher[i] - (key[i]) ** 2) / key[i] )
plain.append(chr(_lowercase ) )
return "".join(_lowercase )
if __name__ == "__main__":
a = Onepad().encrypt("Hello")
print(c, k)
print(Onepad().decrypt(c, k))
| 702 |
'''simple docstring'''
import os
import string
import sys
a = 1 << 8
a = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
a = KEYMAP["up"]
a = KEYMAP["left"]
if sys.platform == "win32":
a = []
a = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
a = ord(str(i))
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
__SCREAMING_SNAKE_CASE = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCAmelCase ) == 0:
# Read the keystroke
__SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__UpperCAmelCase )
if ord(__UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__SCREAMING_SNAKE_CASE = chr(KEYMAP["""esc"""] )
except KeyError:
__SCREAMING_SNAKE_CASE = cha[1]
else:
__SCREAMING_SNAKE_CASE = ch.decode(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__SCREAMING_SNAKE_CASE = sys.stdin.fileno()
__SCREAMING_SNAKE_CASE = termios.tcgetattr(__UpperCAmelCase )
try:
tty.setraw(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase )
return ch
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCAmelCase ) == KEYMAP["esc"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 13 | 0 |
'''simple docstring'''
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
a = logging.get_logger("transformers.models.speecht5")
a = {
"speech_encoder_prenet.layer_norm": "speecht5.encoder.prenet.feature_projection.layer_norm",
"speech_encoder_prenet.post_extract_proj": "speecht5.encoder.prenet.feature_projection.projection",
"speech_encoder_prenet.pos_conv.0": "speecht5.encoder.prenet.pos_conv_embed.conv",
"speech_encoder_prenet.mask_emb": "speecht5.encoder.prenet.masked_spec_embed",
}
a = {
"text_encoder_prenet.encoder_prenet.0": "speecht5.encoder.prenet.embed_tokens",
"text_encoder_prenet.encoder_prenet.1.alpha": "speecht5.encoder.prenet.encode_positions.alpha",
}
a = {
"speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0": "speecht5.decoder.prenet.layers.0",
"speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0": "speecht5.decoder.prenet.layers.1",
"speech_decoder_prenet.decoder_prenet.0.1": "speecht5.decoder.prenet.final_layer",
"speech_decoder_prenet.decoder_prenet.1.alpha": "speecht5.decoder.prenet.encode_positions.alpha",
"speech_decoder_prenet.spkembs_layer.0": "speecht5.decoder.prenet.speaker_embeds_layer",
}
a = {
"speech_decoder_postnet.feat_out": "speech_decoder_postnet.feat_out",
"speech_decoder_postnet.prob_out": "speech_decoder_postnet.prob_out",
"speech_decoder_postnet.postnet.postnet.0.0": "speech_decoder_postnet.layers.0.conv",
"speech_decoder_postnet.postnet.postnet.0.1": "speech_decoder_postnet.layers.0.batch_norm",
"speech_decoder_postnet.postnet.postnet.1.0": "speech_decoder_postnet.layers.1.conv",
"speech_decoder_postnet.postnet.postnet.1.1": "speech_decoder_postnet.layers.1.batch_norm",
"speech_decoder_postnet.postnet.postnet.2.0": "speech_decoder_postnet.layers.2.conv",
"speech_decoder_postnet.postnet.postnet.2.1": "speech_decoder_postnet.layers.2.batch_norm",
"speech_decoder_postnet.postnet.postnet.3.0": "speech_decoder_postnet.layers.3.conv",
"speech_decoder_postnet.postnet.postnet.3.1": "speech_decoder_postnet.layers.3.batch_norm",
"speech_decoder_postnet.postnet.postnet.4.0": "speech_decoder_postnet.layers.4.conv",
"speech_decoder_postnet.postnet.postnet.4.1": "speech_decoder_postnet.layers.4.batch_norm",
}
a = {
"text_decoder_prenet.embed_tokens": "speecht5.decoder.prenet.embed_tokens",
}
a = {
"text_decoder_postnet.output_projection": "text_decoder_postnet.lm_head",
}
a = {
"encoder.layers.*.self_attn.k_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj",
"encoder.layers.*.self_attn.v_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj",
"encoder.layers.*.self_attn.q_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj",
"encoder.layers.*.self_attn.out_proj": "speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj",
"encoder.layers.*.self_attn_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.layer_norm",
"encoder.layers.*.fc1": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense",
"encoder.layers.*.fc2": "speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense",
"encoder.layers.*.final_layer_norm": "speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "speecht5.encoder.wrapped_encoder.layer_norm",
"encoder.pos_emb.pe_k": "speecht5.encoder.wrapped_encoder.embed_positions.pe_k",
}
a = {
"decoder.layers.*.self_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj",
"decoder.layers.*.self_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj",
"decoder.layers.*.self_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj",
"decoder.layers.*.self_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj",
"decoder.layers.*.self_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm",
"decoder.layers.*.encoder_attn.k_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj",
"decoder.layers.*.encoder_attn.v_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj",
"decoder.layers.*.encoder_attn.q_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj",
"decoder.layers.*.encoder_attn.out_proj": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj",
"decoder.layers.*.encoder_attn_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm",
"decoder.layers.*.fc1": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense",
"decoder.layers.*.fc2": "speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense",
"decoder.layers.*.final_layer_norm": "speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm",
}
a = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
a = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
a = []
a = [
"encoder.version",
"encoder.layers.*.norm_k.weight",
"encoder.layers.*.norm_k.bias",
"decoder.version",
"decoder.layers.*.norm_k.weight",
"decoder.layers.*.norm_k.bias",
"decoder.pos_emb.pe_k",
"speech_encoder_prenet.embed_positions._float_tensor",
"text_decoder_prenet.embed_positions._float_tensor",
]
a = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"speech_decoder_prenet.*",
"speech_decoder_postnet.*",
]
a = IGNORE_KEYS + [
"encoder.proj",
"speech_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
a = IGNORE_KEYS + [
"encoder.proj",
"text_encoder_prenet.*",
"text_decoder_prenet.*",
"text_decoder_postnet.*",
]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
for attribute in key.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if weight_type is not None:
__SCREAMING_SNAKE_CASE = getattr(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).shape
else:
__SCREAMING_SNAKE_CASE = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "running_mean":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "running_var":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "num_batches_tracked":
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
logger.info(f"""{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
for key in ignore_keys:
if key.endswith(""".*""" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
__SCREAMING_SNAKE_CASE = key.split(""".*.""" )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
if task == "s2t":
__SCREAMING_SNAKE_CASE = hf_model.speechta.encoder.prenet.feature_encoder
__SCREAMING_SNAKE_CASE = MAPPING_S2T
__SCREAMING_SNAKE_CASE = IGNORE_KEYS_S2T
elif task == "t2s":
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = MAPPING_T2S
__SCREAMING_SNAKE_CASE = IGNORE_KEYS_T2S
elif task == "s2s":
__SCREAMING_SNAKE_CASE = hf_model.speechta.encoder.prenet.feature_encoder
__SCREAMING_SNAKE_CASE = MAPPING_S2S
__SCREAMING_SNAKE_CASE = IGNORE_KEYS_S2S
else:
raise ValueError(f"""Unsupported task: {task}""" )
for name, value in fairseq_dict.items():
if should_ignore(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
logger.info(f"""{name} was ignored""" )
continue
__SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , hf_model.config.feat_extract_norm == """group""" , )
__SCREAMING_SNAKE_CASE = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
__SCREAMING_SNAKE_CASE = key.split(""".*.""" )
if prefix in name and suffix in name:
__SCREAMING_SNAKE_CASE = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
__SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE = name.split(__SCREAMING_SNAKE_CASE )[0].split(""".""" )[-2]
__SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , __SCREAMING_SNAKE_CASE )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE = "weight_g"
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE = "weight_v"
elif "bias" in name:
__SCREAMING_SNAKE_CASE = "bias"
elif "weight" in name:
__SCREAMING_SNAKE_CASE = "weight"
elif "running_mean" in name:
__SCREAMING_SNAKE_CASE = "running_mean"
elif "running_var" in name:
__SCREAMING_SNAKE_CASE = "running_var"
elif "num_batches_tracked" in name:
__SCREAMING_SNAKE_CASE = "num_batches_tracked"
else:
__SCREAMING_SNAKE_CASE = None
set_recursively(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
continue
if not is_used:
unused_weights.append(__SCREAMING_SNAKE_CASE )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1]
__SCREAMING_SNAKE_CASE = name.split(""".""" )
__SCREAMING_SNAKE_CASE = int(items[0] )
__SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=None , ) -> Union[str, Any]:
'''simple docstring'''
if config_path is not None:
__SCREAMING_SNAKE_CASE = SpeechTaConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
__SCREAMING_SNAKE_CASE = SpeechTaConfig()
if task == "s2t":
__SCREAMING_SNAKE_CASE = config.max_text_positions
__SCREAMING_SNAKE_CASE = SpeechTaForSpeechToText(__SCREAMING_SNAKE_CASE )
elif task == "t2s":
__SCREAMING_SNAKE_CASE = 1876
__SCREAMING_SNAKE_CASE = 600
__SCREAMING_SNAKE_CASE = config.max_speech_positions
__SCREAMING_SNAKE_CASE = SpeechTaForTextToSpeech(__SCREAMING_SNAKE_CASE )
elif task == "s2s":
__SCREAMING_SNAKE_CASE = 1876
__SCREAMING_SNAKE_CASE = config.max_speech_positions
__SCREAMING_SNAKE_CASE = SpeechTaForSpeechToSpeech(__SCREAMING_SNAKE_CASE )
else:
raise ValueError(f"""Unknown task name: {task}""" )
if vocab_path:
__SCREAMING_SNAKE_CASE = SpeechTaTokenizer(__SCREAMING_SNAKE_CASE , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
__SCREAMING_SNAKE_CASE = AddedToken("""<mask>""" , lstrip=__SCREAMING_SNAKE_CASE , rstrip=__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = mask_token
tokenizer.add_special_tokens({"""mask_token""": mask_token} )
tokenizer.add_tokens(["""<ctc_blank>"""] )
__SCREAMING_SNAKE_CASE = SpeechTaFeatureExtractor()
__SCREAMING_SNAKE_CASE = SpeechTaProcessor(tokenizer=__SCREAMING_SNAKE_CASE , feature_extractor=__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.load(__SCREAMING_SNAKE_CASE )
recursively_load_weights(fairseq_checkpoint["""model"""] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
if repo_id:
print("""Pushing to the hub...""" )
processor.push_to_hub(__SCREAMING_SNAKE_CASE )
model.push_to_hub(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument(
"--task",
default="s2t",
type=str,
help="Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.",
)
parser.add_argument("--checkpoint_path", required=True, default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--vocab_path", default=None, type=str, help="Path to SentencePiece model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--pytorch_dump_folder_path", required=True, default=None, type=str, help="Path to the output PyTorch model."
)
parser.add_argument(
"--push_to_hub", default=None, type=str, help="Where to upload the converted model on the 🤗 hub."
)
a = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 703 |
'''simple docstring'''
from __future__ import annotations
import bisect
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
__SCREAMING_SNAKE_CASE = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE = midpoint - 1
else:
__SCREAMING_SNAKE_CASE = midpoint + 1
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if index != len(__UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
if right < left:
return None
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase )
if __name__ == "__main__":
a = input("Enter numbers separated by comma:\n").strip()
a = sorted(int(item) for item in user_input.split(","))
a = int(input("Enter a single number to be found in the list:\n"))
a = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 13 | 0 |
'''simple docstring'''
import inspect
import unittest
from typing import List
import numpy as np
from transformers import EfficientFormerConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
)
from transformers.models.efficientformer.modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
)
if is_vision_available():
from PIL import Image
from transformers import EfficientFormerImageProcessor
class __a :
def __init__( self : Union[str, Any] ,lowerCamelCase : str ,lowerCamelCase : int = 13 ,lowerCamelCase : int = 64 ,lowerCamelCase : int = 2 ,lowerCamelCase : int = 3 ,lowerCamelCase : int = 3 ,lowerCamelCase : bool = True ,lowerCamelCase : bool = True ,lowerCamelCase : int = 128 ,lowerCamelCase : int=[16, 32, 64, 128] ,lowerCamelCase : int = 7 ,lowerCamelCase : int = 4 ,lowerCamelCase : int = 37 ,lowerCamelCase : str = "gelu" ,lowerCamelCase : float = 0.1 ,lowerCamelCase : float = 0.1 ,lowerCamelCase : int = 10 ,lowerCamelCase : float = 0.02 ,lowerCamelCase : int = 2 ,lowerCamelCase : int = 1 ,lowerCamelCase : int = 128 ,lowerCamelCase : List[int] = [2, 2, 2, 2] ,lowerCamelCase : int = 2 ,lowerCamelCase : int = 2 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = encoder_stride
__SCREAMING_SNAKE_CASE = num_attention_outputs
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = embed_dim + 1
__SCREAMING_SNAKE_CASE = resolution
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = dim
__SCREAMING_SNAKE_CASE = mlp_expansion_ratio
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return EfficientFormerConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=A_ ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,resolution=self.resolution ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,dim=self.dim ,mlp_expansion_ratio=self.mlp_expansion_ratio ,)
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : int ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFEfficientFormerModel(config=A_ )
__SCREAMING_SNAKE_CASE = model(A_ ,training=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : int ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = TFEfficientFormerForImageClassification(A_ )
__SCREAMING_SNAKE_CASE = model(A_ ,labels=A_ ,training=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = TFEfficientFormerForImageClassification(A_ )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_tf
class __a ( _UpperCAmelCase, _UpperCAmelCase, unittest.TestCase ):
__UpperCamelCase : Optional[Any] = (
(
TFEfficientFormerModel,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerForImageClassification,
)
if is_tf_available()
else ()
)
__UpperCamelCase : List[Any] = (
{
'''feature-extraction''': TFEfficientFormerModel,
'''image-classification''': (
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
),
}
if is_tf_available()
else {}
)
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Tuple = False
__UpperCamelCase : Union[str, Any] = False
__UpperCamelCase : Optional[Any] = False
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFEfficientFormerModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(
self ,config_class=A_ ,has_text_modality=A_ ,hidden_size=37 )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""EfficientFormer does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="""EfficientFormer does not support input and output embeddings""" )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(A_ )
__SCREAMING_SNAKE_CASE = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,A_ )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
def check_hidden_states_output(lowerCamelCase : Dict ,lowerCamelCase : int ,lowerCamelCase : List[str] ):
__SCREAMING_SNAKE_CASE = model_class(A_ )
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A_ ,A_ ) ,training=A_ )
__SCREAMING_SNAKE_CASE = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
__SCREAMING_SNAKE_CASE = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(A_ ) ,A_ )
if hasattr(self.model_tester ,"""encoder_seq_length""" ):
__SCREAMING_SNAKE_CASE = self.model_tester.encoder_seq_length
if hasattr(self.model_tester ,"""chunk_length""" ) and self.model_tester.chunk_length > 1:
__SCREAMING_SNAKE_CASE = seq_length * self.model_tester.chunk_length
else:
__SCREAMING_SNAKE_CASE = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) ,[seq_length, self.model_tester.hidden_size] ,)
if config.is_encoder_decoder:
__SCREAMING_SNAKE_CASE = outputs.decoder_hidden_states
self.asseretIsInstance(A_ ,(list, tuple) )
self.assertEqual(len(A_ ) ,A_ )
__SCREAMING_SNAKE_CASE = getattr(self.model_tester ,"""seq_length""" ,A_ )
__SCREAMING_SNAKE_CASE = getattr(self.model_tester ,"""decoder_seq_length""" ,A_ )
self.assertListEqual(
list(hidden_states[-1].shape[-2:] ) ,[decoder_seq_length, self.model_tester.hidden_size] ,)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(A_ ,A_ ,A_ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
check_hidden_states_output(A_ ,A_ ,A_ )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Tuple ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict=False ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "TFEfficientFormerForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
@unittest.skip(reason="""EfficientFormer does not implement masked image modeling yet""" )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*A_ )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
@slow
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
for model_name in TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = TFEfficientFormerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = getattr(self.model_tester ,"""seq_length""" ,A_ )
__SCREAMING_SNAKE_CASE = getattr(self.model_tester ,"""encoder_seq_length""" ,A_ )
__SCREAMING_SNAKE_CASE = getattr(self.model_tester ,"""key_length""" ,A_ )
__SCREAMING_SNAKE_CASE = getattr(self.model_tester ,"""chunk_length""" ,A_ )
if chunk_length is not None and hasattr(self.model_tester ,"""num_hashes""" ):
__SCREAMING_SNAKE_CASE = encoder_seq_length * self.model_tester.num_hashes
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(A_ )
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A_ ,A_ ) ,training=A_ )
__SCREAMING_SNAKE_CASE = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) ,self.model_tester.num_attention_outputs )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = model_class(A_ )
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(A_ ,A_ ) ,training=A_ )
__SCREAMING_SNAKE_CASE = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(A_ ) ,self.model_tester.num_attention_outputs )
if chunk_length is not None:
self.assertListEqual(
list(attentions[0].shape[-4:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, chunk_length, encoder_key_length] ,)
else:
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length] ,)
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# Prepare our model
__SCREAMING_SNAKE_CASE = model_class(A_ )
# These are maximally general inputs for the model, with multiple None dimensions
# Hopefully this will catch any conditionals that fail for flexible shapes
__SCREAMING_SNAKE_CASE = {
key: tf.keras.Input(shape=val.shape[1:] ,dtype=val.dtype ,name=A_ )
for key, val in model.input_signature.items()
if key in model.dummy_inputs
}
__SCREAMING_SNAKE_CASE = model(A_ )
self.assertTrue(outputs_dict is not None )
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_tf
@require_vision
class __a ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return (
EfficientFormerImageProcessor.from_pretrained("""snap-research/efficientformer-l1-300""" )
if is_vision_available()
else None
)
@slow
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFEfficientFormerForImageClassification.from_pretrained("""snap-research/efficientformer-l1-300""" )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=A_ ,return_tensors="""tf""" )
# forward pass
__SCREAMING_SNAKE_CASE = model(**A_ ,training=A_ )
# verify the logits
__SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,A_ )
__SCREAMING_SNAKE_CASE = tf.constant([-0.0_555, 0.4_825, -0.0_852] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,A_ ,atol=1E-4 ) )
@slow
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TFEfficientFormerForImageClassificationWithTeacher.from_pretrained(
"""snap-research/efficientformer-l1-300""" )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = prepare_img()
__SCREAMING_SNAKE_CASE = image_processor(images=A_ ,return_tensors="""tf""" )
# forward pass
__SCREAMING_SNAKE_CASE = model(**A_ ,training=A_ )
# verify the logits
__SCREAMING_SNAKE_CASE = tf.TensorShape((1, 1000) )
self.assertEqual(outputs.logits.shape ,A_ )
__SCREAMING_SNAKE_CASE = tf.constant([-0.1_312, 0.4_353, -1.0_499] )
self.assertTrue(np.allclose(outputs.logits[0, :3] ,A_ ,atol=1E-4 ) )
| 704 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a = logging.get_logger(__name__)
class __a ( _snake_case ):
__UpperCamelCase : int = 'linear'
__UpperCamelCase : Tuple = 'cosine'
__UpperCamelCase : Tuple = 'cosine_with_restarts'
__UpperCamelCase : List[Any] = 'polynomial'
__UpperCamelCase : Optional[Any] = 'constant'
__UpperCamelCase : Optional[int] = 'constant_with_warmup'
__UpperCamelCase : List[Any] = 'piecewise_constant'
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> List[Any]:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE = int(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = float(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ):
def rule_func(__UpperCAmelCase ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(__UpperCAmelCase , __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> int:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Tuple:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SchedulerType(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> list[tuple[int, int]]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = position
__SCREAMING_SNAKE_CASE = [
(y + 1, x + 2),
(y - 1, x + 2),
(y + 1, x - 2),
(y - 1, x - 2),
(y + 2, x + 1),
(y + 2, x - 1),
(y - 2, x + 1),
(y - 2, x - 1),
]
__SCREAMING_SNAKE_CASE = []
for position in positions:
__SCREAMING_SNAKE_CASE = position
if 0 <= y_test < n and 0 <= x_test < n:
permissible_positions.append(_lowerCamelCase )
return permissible_positions
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
return not any(elem == 0 for row in board for elem in row )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> bool:
'''simple docstring'''
if is_complete(_lowerCamelCase ):
return True
for position in get_valid_pos(_lowerCamelCase , len(_lowerCamelCase ) ):
__SCREAMING_SNAKE_CASE = position
if board[y][x] == 0:
__SCREAMING_SNAKE_CASE = curr + 1
if open_knight_tour_helper(_lowerCamelCase , _lowerCamelCase , curr + 1 ):
return True
__SCREAMING_SNAKE_CASE = 0
return False
def __magic_name__ ( __UpperCAmelCase ) -> list[list[int]]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [[0 for i in range(_lowerCamelCase )] for j in range(_lowerCamelCase )]
for i in range(_lowerCamelCase ):
for j in range(_lowerCamelCase ):
__SCREAMING_SNAKE_CASE = 1
if open_knight_tour_helper(_lowerCamelCase , (i, j) , 1 ):
return board
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = f"""Open Kight Tour cannot be performed on a board of size {n}"""
raise ValueError(_lowerCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 705 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 | 0 |
'''simple docstring'''
import argparse
import struct
import unittest
class __a :
def __init__( self : List[Any] ,lowerCamelCase : bytes ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = data
# Initialize hash values
__SCREAMING_SNAKE_CASE = [
0x6A_09E_667,
0xBB_67A_E85,
0x3C_6EF_372,
0xA5_4FF_53A,
0x51_0E5_27F,
0x9B_056_88C,
0x1F_83D_9AB,
0x5B_E0C_D19,
]
# Initialize round constants
__SCREAMING_SNAKE_CASE = [
0x42_8A2_F98,
0x71_374_491,
0xB5_C0F_BCF,
0xE9_B5D_BA5,
0x39_56C_25B,
0x59_F11_1F1,
0x92_3F8_2A4,
0xAB_1C5_ED5,
0xD8_07A_A98,
0x12_835_B01,
0x24_318_5BE,
0x55_0C7_DC3,
0x72_BE5_D74,
0x80_DEB_1FE,
0x9B_DC0_6A7,
0xC1_9BF_174,
0xE4_9B6_9C1,
0xEF_BE4_786,
0x0F_C19_DC6,
0x24_0CA_1CC,
0x2D_E92_C6F,
0x4A_748_4AA,
0x5C_B0A_9DC,
0x76_F98_8DA,
0x98_3E5_152,
0xA8_31C_66D,
0xB0_032_7C8,
0xBF_597_FC7,
0xC6_E00_BF3,
0xD5_A79_147,
0x06_CA6_351,
0x14_292_967,
0x27_B70_A85,
0x2E_1B2_138,
0x4D_2C6_DFC,
0x53_380_D13,
0x65_0A7_354,
0x76_6A0_ABB,
0x81_C2C_92E,
0x92_722_C85,
0xA2_BFE_8A1,
0xA8_1A6_64B,
0xC2_4B8_B70,
0xC7_6C5_1A3,
0xD1_92E_819,
0xD6_990_624,
0xF4_0E3_585,
0x10_6AA_070,
0x19_A4C_116,
0x1E_376_C08,
0x27_487_74C,
0x34_B0B_CB5,
0x39_1C0_CB3,
0x4E_D8A_A4A,
0x5B_9CC_A4F,
0x68_2E6_FF3,
0x74_8F8_2EE,
0x78_A56_36F,
0x84_C87_814,
0x8C_C70_208,
0x90_BEF_FFA,
0xA4_506_CEB,
0xBE_F9A_3F7,
0xC6_717_8F2,
]
__SCREAMING_SNAKE_CASE = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase : bytes ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = b"""\x80""" + (b"""\x00""" * (63 - (len(__UpperCamelCase ) + 8) % 64))
__SCREAMING_SNAKE_CASE = struct.pack(""">Q""" ,(len(__UpperCamelCase ) * 8) )
return data + padding + big_endian_integer
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [
self.preprocessed_data[x : x + 64]
for x in range(0 ,len(self.preprocessed_data ) ,64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__SCREAMING_SNAKE_CASE = list(struct.unpack(""">16L""" ,__UpperCamelCase ) )
# add 48 0-ed integers
words += [0] * 48
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.hashes
for index in range(0 ,64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__SCREAMING_SNAKE_CASE = (
self.ror(words[index - 15] ,7 )
^ self.ror(words[index - 15] ,18 )
^ (words[index - 15] >> 3)
)
__SCREAMING_SNAKE_CASE = (
self.ror(words[index - 2] ,17 )
^ self.ror(words[index - 2] ,19 )
^ (words[index - 2] >> 10)
)
__SCREAMING_SNAKE_CASE = (
words[index - 16] + sa + words[index - 7] + sa
) % 0x100_000_000
# Compression
__SCREAMING_SNAKE_CASE = self.ror(__UpperCamelCase ,6 ) ^ self.ror(__UpperCamelCase ,11 ) ^ self.ror(__UpperCamelCase ,25 )
__SCREAMING_SNAKE_CASE = (e & f) ^ ((~e & 0xFF_FFF_FFF) & g)
__SCREAMING_SNAKE_CASE = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100_000_000
__SCREAMING_SNAKE_CASE = self.ror(__UpperCamelCase ,2 ) ^ self.ror(__UpperCamelCase ,13 ) ^ self.ror(__UpperCamelCase ,22 )
__SCREAMING_SNAKE_CASE = (a & b) ^ (a & c) ^ (b & c)
__SCREAMING_SNAKE_CASE = (sa + maj) % 0x100_000_000
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
g,
f,
e,
((d + tempa) % 0x100_000_000),
c,
b,
a,
((tempa + tempa) % 0x100_000_000),
)
__SCREAMING_SNAKE_CASE = [a, b, c, d, e, f, g, h]
# Modify final values
__SCREAMING_SNAKE_CASE = [
((element + mutated_hash_values[index]) % 0x100_000_000)
for index, element in enumerate(self.hashes )
]
__SCREAMING_SNAKE_CASE = """""".join([hex(__UpperCamelCase )[2:].zfill(8 ) for value in self.hashes] )
def UpperCAmelCase__ ( self : str ,lowerCamelCase : int ,lowerCamelCase : int ):
'''simple docstring'''
return 0xFF_FFF_FFF & (value << (32 - rotations)) | (value >> rotations)
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
import hashlib
__SCREAMING_SNAKE_CASE = bytes("""Test String""" ,"""utf-8""" )
self.assertEqual(SHAaaa(__UpperCamelCase ).hash ,hashlib.shaaaa(__UpperCamelCase ).hexdigest() )
def __magic_name__ ( ) -> Any:
'''simple docstring'''
import doctest
doctest.testmod()
__SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"""-s""" , """--string""" , dest="""input_string""" , default="""Hello World!! Welcome to Cryptography""" , help="""Hash the string""" , )
parser.add_argument(
"""-f""" , """--file""" , dest="""input_file""" , help="""Hash contents of a file""" )
__SCREAMING_SNAKE_CASE = parser.parse_args()
__SCREAMING_SNAKE_CASE = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
else:
__SCREAMING_SNAKE_CASE = bytes(_SCREAMING_SNAKE_CASE , """utf-8""" )
print(SHAaaa(_SCREAMING_SNAKE_CASE ).hash )
if __name__ == "__main__":
main()
| 706 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase = "AAPL" ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase ).text , """html.parser""" )
__SCREAMING_SNAKE_CASE = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 13 | 0 |
'''simple docstring'''
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a = abspath(join(dirname(dirname(dirname(__file__))), "src"))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="ignore", category=FutureWarning)
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
from transformers.testing_utils import pytest_terminal_summary_main
__SCREAMING_SNAKE_CASE = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__UpperCAmelCase , id=__UpperCAmelCase )
| 707 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if num < 0:
return False
__SCREAMING_SNAKE_CASE = num
__SCREAMING_SNAKE_CASE = 0
while num > 0:
__SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print("Googling.....")
a = "https://www.google.com/search?q=" + " ".join(sys.argv[1:])
a = requests.get(url, headers={"UserAgent": UserAgent().random})
# res.raise_for_status()
with open("project1a.html", "wb") as out_file: # only for knowing the class
for data in res.iter_content(10000):
out_file.write(data)
a = BeautifulSoup(res.text, "html.parser")
a = list(soup.select(".eZt8xd"))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get("href"))
else:
webbrowser.open(F'''https://google.com{link.get("href")}''')
| 708 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a = list[list[float | int]]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for row in range(__UpperCAmelCase ):
for col in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = matrix[row][col]
__SCREAMING_SNAKE_CASE = vector[row][0]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while row < size and col < size:
# pivoting
__SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__UpperCAmelCase , __UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col]
__SCREAMING_SNAKE_CASE = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __UpperCAmelCase ):
for row in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col]
for cola in range(__UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__UpperCAmelCase )
]
def __magic_name__ ( __UpperCAmelCase ) -> Callable[[int], int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = [[0] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for x_val, y_val in enumerate(__UpperCAmelCase ):
for col in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1)
__SCREAMING_SNAKE_CASE = y_val
__SCREAMING_SNAKE_CASE = solve(__UpperCAmelCase , __UpperCAmelCase )
def interpolated_func(__UpperCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__UpperCAmelCase ) )
return interpolated_func
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __magic_name__ ( __UpperCAmelCase = question_function , __UpperCAmelCase = 10 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [func(__UpperCAmelCase ) for x_val in range(1 , order + 1 )]
__SCREAMING_SNAKE_CASE = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for poly in polynomials:
__SCREAMING_SNAKE_CASE = 1
while func(__UpperCAmelCase ) == poly(__UpperCAmelCase ):
x_val += 1
ret += poly(__UpperCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
import math
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__UpperCAmelCase ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
a = [num for num in range(3, 100001, 2) if not is_prime(num)]
def __magic_name__ ( __UpperCAmelCase ) -> list[int]:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
__SCREAMING_SNAKE_CASE = []
for num in range(len(__UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = 0
while 2 * i * i <= odd_composites[num]:
__SCREAMING_SNAKE_CASE = odd_composites[num] - 2 * i * i
if is_prime(__UpperCAmelCase ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__UpperCAmelCase ) == n:
return list_nums
return []
def __magic_name__ ( ) -> int:
'''simple docstring'''
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F'''{solution() = }''')
| 709 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class __a ( _snake_case ):
def __init__( self : Union[str, Any] ,**lowerCamelCase : str ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Dict ,lowerCamelCase : Union[str, List[str], "Image", List["Image"]] ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return super().__call__(lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Union[str, Any]="This is a photo of {}." ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.image_processor(images=[image] ,return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = candidate_labels
__SCREAMING_SNAKE_CASE = [hypothesis_template.format(lowerCamelCase ) for x in candidate_labels]
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,return_tensors=self.framework ,padding=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [text_inputs]
return inputs
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = text_inputs[0]
else:
# Batching case.
__SCREAMING_SNAKE_CASE = text_inputs[0][0]
__SCREAMING_SNAKE_CASE = self.model(**lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_outputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""][0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = probs.tolist()
if not isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = [scores]
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = stable_softmax(lowerCamelCase ,axis=-1 )
__SCREAMING_SNAKE_CASE = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__SCREAMING_SNAKE_CASE = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase ,lowerCamelCase ) ,key=lambda lowerCamelCase : -x[0] )
]
return result
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
import time
a = list[tuple[int, int]]
a = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
a = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class __a :
def __init__( self : Dict ,lowerCamelCase : int ,lowerCamelCase : int ,lowerCamelCase : int ,lowerCamelCase : int ,lowerCamelCase : Node | None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pos_x
__SCREAMING_SNAKE_CASE = pos_y
__SCREAMING_SNAKE_CASE = (pos_y, pos_x)
__SCREAMING_SNAKE_CASE = goal_x
__SCREAMING_SNAKE_CASE = goal_y
__SCREAMING_SNAKE_CASE = parent
class __a :
def __init__( self : Optional[int] ,lowerCamelCase : tuple[int, int] ,lowerCamelCase : tuple[int, int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Node(start[1] ,start[0] ,goal[1] ,goal[0] ,UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = Node(goal[1] ,goal[0] ,goal[1] ,goal[0] ,UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = [self.start]
__SCREAMING_SNAKE_CASE = False
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
while self.node_queue:
__SCREAMING_SNAKE_CASE = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
__SCREAMING_SNAKE_CASE = True
return self.retrace_path(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = self.get_successors(UpperCamelCase__ )
for node in successors:
self.node_queue.append(UpperCamelCase__ )
if not self.reached:
return [self.start.pos]
return None
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : Node ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for action in delta:
__SCREAMING_SNAKE_CASE = parent.pos_x + action[1]
__SCREAMING_SNAKE_CASE = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(UpperCamelCase__ ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(UpperCamelCase__ ,UpperCamelCase__ ,self.target.pos_y ,self.target.pos_x ,UpperCamelCase__ ) )
return successors
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Node | None ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = node
__SCREAMING_SNAKE_CASE = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
__SCREAMING_SNAKE_CASE = current_node.parent
path.reverse()
return path
class __a :
def __init__( self : str ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BreadthFirstSearch(UpperCamelCase__ ,UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = BreadthFirstSearch(UpperCamelCase__ ,UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = False
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
__SCREAMING_SNAKE_CASE = self.fwd_bfs.node_queue.pop(0 )
__SCREAMING_SNAKE_CASE = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
__SCREAMING_SNAKE_CASE = True
return self.retrace_bidirectional_path(
UpperCamelCase__ ,UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = current_bwd_node
__SCREAMING_SNAKE_CASE = current_fwd_node
__SCREAMING_SNAKE_CASE = {
self.fwd_bfs: self.fwd_bfs.get_successors(UpperCamelCase__ ),
self.bwd_bfs: self.bwd_bfs.get_successors(UpperCamelCase__ ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(UpperCamelCase__ )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Node ,lowerCamelCase : Node ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.fwd_bfs.retrace_path(UpperCamelCase__ )
__SCREAMING_SNAKE_CASE = self.bwd_bfs.retrace_path(UpperCamelCase__ )
bwd_path.pop()
bwd_path.reverse()
__SCREAMING_SNAKE_CASE = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
a = (0, 0)
a = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
a = time.time()
a = BreadthFirstSearch(init, goal)
a = bfs.search()
a = time.time() - start_bfs_time
print("Unidirectional BFS computation time : ", bfs_time)
a = time.time()
a = BidirectionalBreadthFirstSearch(init, goal)
a = bd_bfs.search()
a = time.time() - start_bd_bfs_time
print("Bidirectional BFS computation time : ", bd_bfs_time)
| 710 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
require_version(deps[pkg] , __UpperCAmelCase )
| 13 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
a = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def __magic_name__ ( __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
with open(__lowerCAmelCase , """r""" ) as file:
for line_number, line in enumerate(__lowerCAmelCase ):
__SCREAMING_SNAKE_CASE = line.strip()
if line:
__SCREAMING_SNAKE_CASE = line.split()
__SCREAMING_SNAKE_CASE = line_number
__SCREAMING_SNAKE_CASE = words[0]
__SCREAMING_SNAKE_CASE = value
return result
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
for attribute in key.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(__lowerCAmelCase , __lowerCAmelCase )
__SCREAMING_SNAKE_CASE = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCAmelCase ):
__SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__SCREAMING_SNAKE_CASE = """param"""
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE = getattr(__lowerCAmelCase , __lowerCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(__lowerCAmelCase , __lowerCAmelCase )
__SCREAMING_SNAKE_CASE = shape_pointer.shape
# let's reduce dimension
__SCREAMING_SNAKE_CASE = value[0]
else:
__SCREAMING_SNAKE_CASE = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(__lowerCAmelCase , __lowerCAmelCase )
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__lowerCAmelCase ):
__SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__SCREAMING_SNAKE_CASE = """param"""
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE = """.""".join([key, hf_param_name] )
else:
__SCREAMING_SNAKE_CASE = key
__SCREAMING_SNAKE_CASE = value if """lm_head""" in full_key else value[0]
a = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
for key, mapped_key in MAPPING.items():
__SCREAMING_SNAKE_CASE = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE = name.split(__lowerCAmelCase )[0].split(""".""" )[-2]
__SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , __lowerCAmelCase )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE = """weight_g"""
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE = """weight_v"""
elif "bias" in name:
__SCREAMING_SNAKE_CASE = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__SCREAMING_SNAKE_CASE = """weight"""
else:
__SCREAMING_SNAKE_CASE = None
if hf_dict is not None:
rename_dict(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
else:
set_recursively(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
return is_used
return is_used
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
__SCREAMING_SNAKE_CASE = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
__SCREAMING_SNAKE_CASE = True
else:
__SCREAMING_SNAKE_CASE = load_wavaveca_layer(__lowerCAmelCase , __lowerCAmelCase , __lowerCAmelCase )
if not is_used:
unused_weights.append(__lowerCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1]
__SCREAMING_SNAKE_CASE = name.split(""".""" )
__SCREAMING_SNAKE_CASE = int(items[0] )
__SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__lowerCAmelCase )
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False ) -> Dict:
'''simple docstring'''
if config_path is not None:
__SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(__lowerCAmelCase )
else:
__SCREAMING_SNAKE_CASE = WavaVecaConfig()
if is_seq_class:
__SCREAMING_SNAKE_CASE = read_txt_into_dict(__lowerCAmelCase )
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = WavaVecaForSequenceClassification(__lowerCAmelCase )
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
feature_extractor.save_pretrained(__lowerCAmelCase )
elif is_finetuned:
if dict_path:
__SCREAMING_SNAKE_CASE = Dictionary.load(__lowerCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__SCREAMING_SNAKE_CASE = target_dict.pad_index
__SCREAMING_SNAKE_CASE = target_dict.bos_index
__SCREAMING_SNAKE_CASE = target_dict.eos_index
__SCREAMING_SNAKE_CASE = len(target_dict.symbols )
__SCREAMING_SNAKE_CASE = os.path.join(__lowerCAmelCase , """vocab.json""" )
if not os.path.isdir(__lowerCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__lowerCAmelCase ) )
return
os.makedirs(__lowerCAmelCase , exist_ok=__lowerCAmelCase )
__SCREAMING_SNAKE_CASE = target_dict.indices
# fairseq has the <pad> and <s> switched
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
with open(__lowerCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__lowerCAmelCase , __lowerCAmelCase )
__SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
__lowerCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__lowerCAmelCase , )
__SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == """layer""" else False
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__lowerCAmelCase , return_attention_mask=__lowerCAmelCase , )
__SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=__lowerCAmelCase , tokenizer=__lowerCAmelCase )
processor.save_pretrained(__lowerCAmelCase )
__SCREAMING_SNAKE_CASE = WavaVecaForCTC(__lowerCAmelCase )
else:
__SCREAMING_SNAKE_CASE = WavaVecaForPreTraining(__lowerCAmelCase )
if is_finetuned or is_seq_class:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__SCREAMING_SNAKE_CASE = argparse.Namespace(task="""audio_pretraining""" )
__SCREAMING_SNAKE_CASE = fairseq.tasks.setup_task(__lowerCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__lowerCAmelCase )
__SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(__lowerCAmelCase , __lowerCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(__lowerCAmelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
a = parser.parse_args()
a = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 711 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
a = logging.getLogger(__name__)
@dataclass
class __a :
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}, )
__UpperCamelCase : int = field(
default=1024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the training data.'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the validation data.'} )
__UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__SCREAMING_SNAKE_CASE = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__SCREAMING_SNAKE_CASE = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __a :
__UpperCamelCase : str = field(
default=_snake_case, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, )
__UpperCamelCase : str = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
def __magic_name__ ( ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__SCREAMING_SNAKE_CASE = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__SCREAMING_SNAKE_CASE = data_args.train_file.split(""".""" )[-1]
__SCREAMING_SNAKE_CASE = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__SCREAMING_SNAKE_CASE = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__SCREAMING_SNAKE_CASE = load_dataset("""csv""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__SCREAMING_SNAKE_CASE = load_dataset("""json""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features["""label"""].names
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__SCREAMING_SNAKE_CASE = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__SCREAMING_SNAKE_CASE = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__SCREAMING_SNAKE_CASE = {"""Refused""": 0, """Entailed""": 1}
__SCREAMING_SNAKE_CASE = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__UpperCAmelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__SCREAMING_SNAKE_CASE = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__SCREAMING_SNAKE_CASE = examples["""statement"""]
__SCREAMING_SNAKE_CASE = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__SCREAMING_SNAKE_CASE = tokenizer(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__SCREAMING_SNAKE_CASE = raw_datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__SCREAMING_SNAKE_CASE = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__UpperCAmelCase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , __UpperCAmelCase ) else p.predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = default_data_collator
elif training_args.fpaa:
__SCREAMING_SNAKE_CASE = DataCollatorWithPadding(__UpperCAmelCase , pad_to_multiple_of=8 )
else:
__SCREAMING_SNAKE_CASE = None
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = train_result.metrics
__SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __UpperCAmelCase )
trainer.save_metrics("""train""" , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics("""eval""" , __UpperCAmelCase )
trainer.save_metrics("""eval""" , __UpperCAmelCase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__SCREAMING_SNAKE_CASE = predict_dataset.remove_columns("""label""" )
__SCREAMING_SNAKE_CASE = trainer.predict(__UpperCAmelCase , metric_key_prefix="""predict""" ).predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
__SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(__UpperCAmelCase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
__SCREAMING_SNAKE_CASE = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 13 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import AlignProcessor, EfficientNetImageProcessor
@require_vision
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = [
'''[UNK]''',
'''[CLS]''',
'''[SEP]''',
'''[PAD]''',
'''[MASK]''',
'''want''',
'''##want''',
'''##ed''',
'''wa''',
'''un''',
'''runn''',
'''##ing''',
''',''',
'''low''',
'''lowest''',
]
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE = {
'''do_resize''': True,
'''size''': 20,
'''do_center_crop''': True,
'''crop_size''': 18,
'''do_normalize''': True,
'''image_mean''': [0.48_145_466, 0.4_578_275, 0.40_821_073],
'''image_std''': [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,_UpperCAmelCase )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(_UpperCAmelCase ,_UpperCAmelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ,**lowerCamelCase : Tuple ):
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def UpperCAmelCase__ ( self : int ,**lowerCamelCase : List[Any] ):
'''simple docstring'''
return EfficientNetImageProcessor.from_pretrained(self.tmpdirname ,**_UpperCAmelCase )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(_UpperCAmelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = AlignProcessor(tokenizer=_UpperCAmelCase ,image_processor=_UpperCAmelCase )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = AlignProcessor.from_pretrained(self.tmpdirname ,use_fast=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = AlignProcessor(tokenizer=_UpperCAmelCase ,image_processor=_UpperCAmelCase )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = AlignProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,_UpperCAmelCase )
self.assertIsInstance(processor_fast.tokenizer ,_UpperCAmelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,_UpperCAmelCase )
self.assertIsInstance(processor_fast.image_processor ,_UpperCAmelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AlignProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=_UpperCAmelCase ,padding_value=1.0 )
__SCREAMING_SNAKE_CASE = AlignProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=_UpperCAmelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,_UpperCAmelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,_UpperCAmelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = AlignProcessor(tokenizer=_UpperCAmelCase ,image_processor=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(_UpperCAmelCase ,return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = processor(images=_UpperCAmelCase ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = AlignProcessor(tokenizer=_UpperCAmelCase ,image_processor=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = '''lower newer'''
__SCREAMING_SNAKE_CASE = processor(text=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = tokenizer(_UpperCAmelCase ,padding="""max_length""" ,max_length=64 )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = AlignProcessor(tokenizer=_UpperCAmelCase ,image_processor=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = '''lower newer'''
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=_UpperCAmelCase ,images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(_UpperCAmelCase ):
processor()
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = AlignProcessor(tokenizer=_UpperCAmelCase ,image_processor=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.batch_decode(_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(_UpperCAmelCase )
self.assertListEqual(_UpperCAmelCase ,_UpperCAmelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = AlignProcessor(tokenizer=_UpperCAmelCase ,image_processor=_UpperCAmelCase )
__SCREAMING_SNAKE_CASE = '''lower newer'''
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=_UpperCAmelCase ,images=_UpperCAmelCase )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 712 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 13 | 0 |
'''simple docstring'''
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__SCREAMING_SNAKE_CASE = JsonDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ ).read()
_check_json_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE = (
Features({feature: Value(UpperCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE = JsonDatasetReader(UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_json_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
__SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE = (
Features({feature: Value(UpperCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE = JsonDatasetReader(UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
__SCREAMING_SNAKE_CASE = features.copy()
__SCREAMING_SNAKE_CASE = (
Features({feature: Value(UpperCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = JsonDatasetReader(UpperCAmelCase__ , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__SCREAMING_SNAKE_CASE = JsonDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ , split=UpperCAmelCase__ ).read()
_check_json_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
if issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = jsonl_path
elif issubclass(UpperCAmelCase__ , UpperCAmelCase__ ):
__SCREAMING_SNAKE_CASE = [jsonl_path]
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__SCREAMING_SNAKE_CASE = JsonDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_json_dataset(UpperCAmelCase__ , UpperCAmelCase__ )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=("train",) ) -> Union[str, Any]:
'''simple docstring'''
assert isinstance(UpperCAmelCase__ , UpperCAmelCase__ )
for split in splits:
__SCREAMING_SNAKE_CASE = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
__SCREAMING_SNAKE_CASE = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=UpperCAmelCase__ , keep_in_memory=UpperCAmelCase__ ).read()
_check_json_datasetdict(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__SCREAMING_SNAKE_CASE = features.copy() if features else default_expected_features
__SCREAMING_SNAKE_CASE = (
Features({feature: Value(UpperCAmelCase__ ) for feature, dtype in features.items()} ) if features is not None else None
)
__SCREAMING_SNAKE_CASE = JsonDatasetReader({"""train""": jsonl_path} , features=UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_json_datasetdict(UpperCAmelCase__ , UpperCAmelCase__ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
if split:
__SCREAMING_SNAKE_CASE = {split: jsonl_path}
else:
__SCREAMING_SNAKE_CASE = """train"""
__SCREAMING_SNAKE_CASE = {"""train""": jsonl_path, """test""": jsonl_path}
__SCREAMING_SNAKE_CASE = tmp_path / """cache"""
__SCREAMING_SNAKE_CASE = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
__SCREAMING_SNAKE_CASE = JsonDatasetReader(UpperCAmelCase__ , cache_dir=UpperCAmelCase__ ).read()
_check_json_datasetdict(UpperCAmelCase__ , UpperCAmelCase__ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return json.load(UpperCAmelCase__ )
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
return [json.loads(UpperCAmelCase__ ) for line in buffer]
class __a :
@pytest.mark.parametrize("""lines, load_json_function""" ,[(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Optional[int] ,lowerCamelCase : Any ,lowerCamelCase : Dict ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(_A ,_A ,lines=_A ).write()
buffer.seek(0 )
__SCREAMING_SNAKE_CASE = load_json_function(_A )
assert isinstance(_A ,_A )
assert isinstance(exported_content[0] ,_A )
assert len(_A ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" ,[
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] ,)
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Dict ,lowerCamelCase : Dict ,lowerCamelCase : List[str] ,lowerCamelCase : Tuple ,lowerCamelCase : List[str] ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(_A ,_A ,lines=_A ,orient=_A ).write()
buffer.seek(0 )
__SCREAMING_SNAKE_CASE = load_json(_A )
assert isinstance(_A ,_A )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_A ,"""keys""" ) and not hasattr(exported_content[0] ,"""keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_A ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" ,[(True, load_json_lines), (False, load_json)] )
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : int ,lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(_A ,_A ,lines=_A ,num_proc=2 ).write()
buffer.seek(0 )
__SCREAMING_SNAKE_CASE = load_json_function(_A )
assert isinstance(_A ,_A )
assert isinstance(exported_content[0] ,_A )
assert len(_A ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" ,[
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] ,)
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Dict ,lowerCamelCase : str ,lowerCamelCase : Tuple ,lowerCamelCase : str ):
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(_A ,_A ,lines=_A ,orient=_A ,num_proc=2 ).write()
buffer.seek(0 )
__SCREAMING_SNAKE_CASE = load_json(_A )
assert isinstance(_A ,_A )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(_A ,"""keys""" ) and not hasattr(exported_content[0] ,"""keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(_A ) == 10
def UpperCAmelCase__ ( self : str ,lowerCamelCase : Dict ):
'''simple docstring'''
with pytest.raises(_A ):
with io.BytesIO() as buffer:
JsonDatasetWriter(_A ,_A ,num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" ,[("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def UpperCAmelCase__ ( self : str ,lowerCamelCase : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tmp_path_factory.mktemp("""data""" ) / f"""test.json.{extension}"""
__SCREAMING_SNAKE_CASE = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(_A ,_A ,compression=_A ).write()
with fsspec.open(_A ,"""rb""" ,compression="""infer""" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
with fsspec.open(_A ,"""rb""" ,compression="""infer""" ) as f:
__SCREAMING_SNAKE_CASE = f.read()
assert exported_content == original_content
| 713 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase , params=__UpperCAmelCase ).content , """html.parser""" )
__SCREAMING_SNAKE_CASE = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
__SCREAMING_SNAKE_CASE = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
a = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 13 | 0 |
from ..utils import is_flax_available, is_torch_available
if is_torch_available():
from .autoencoder_kl import AutoencoderKL
from .controlnet import ControlNetModel
from .dual_transformer_ad import DualTransformeraDModel
from .modeling_utils import ModelMixin
from .prior_transformer import PriorTransformer
from .ta_film_transformer import TaFilmDecoder
from .transformer_ad import TransformeraDModel
from .unet_ad import UNetaDModel
from .unet_ad import UNetaDModel
from .unet_ad_condition import UNetaDConditionModel
from .unet_ad_condition import UNetaDConditionModel
from .vq_model import VQModel
if is_flax_available():
from .controlnet_flax import FlaxControlNetModel
from .unet_ad_condition_flax import FlaxUNetaDConditionModel
from .vae_flax import FlaxAutoencoderKL
| 714 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'camembert'
def __init__( self : int ,lowerCamelCase : List[Any]=3_0522 ,lowerCamelCase : List[Any]=768 ,lowerCamelCase : str=12 ,lowerCamelCase : List[str]=12 ,lowerCamelCase : Optional[Any]=3072 ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=512 ,lowerCamelCase : Dict=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : List[Any]=1E-1_2 ,lowerCamelCase : Union[str, Any]=1 ,lowerCamelCase : Optional[Any]=0 ,lowerCamelCase : List[Any]=2 ,lowerCamelCase : List[str]="absolute" ,lowerCamelCase : int=True ,lowerCamelCase : Any=None ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class __a ( _snake_case ):
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 13 | 0 |
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 715 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __a ( unittest.TestCase ):
def __init__( self : Optional[int] ,lowerCamelCase : str ,lowerCamelCase : List[str]=13 ,lowerCamelCase : Optional[Any]=30 ,lowerCamelCase : Dict=2 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : List[str]=True ,lowerCamelCase : str=True ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Dict=5 ,lowerCamelCase : Optional[int]=4 ,lowerCamelCase : List[Any]=37 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Any=0.1 ,lowerCamelCase : str=10 ,lowerCamelCase : Dict=0.02 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase ,initializer_range=self.initializer_range ,)
return config, pixel_values
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModel(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(lowerCamelCase )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase ,hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
@jax.jit
def model_jitted(lowerCamelCase : int ,**lowerCamelCase : Union[str, Any] ):
return model(pixel_values=lowerCamelCase ,**lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) ,len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase ,lowerCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase )
| 13 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
# rely on isort to merge the imports
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
"""configuration_informer""": [
"""INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""InformerConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"""INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""InformerForPrediction""",
"""InformerModel""",
"""InformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_informer import INFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, InformerConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_informer import (
INFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
InformerForPrediction,
InformerModel,
InformerPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 716 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'sew'
def __init__( self : str ,lowerCamelCase : Any=32 ,lowerCamelCase : str=768 ,lowerCamelCase : str=12 ,lowerCamelCase : Union[str, Any]=12 ,lowerCamelCase : Union[str, Any]=3072 ,lowerCamelCase : int=2 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : List[str]=1E-5 ,lowerCamelCase : Tuple="group" ,lowerCamelCase : Optional[Any]="gelu" ,lowerCamelCase : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,lowerCamelCase : Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,lowerCamelCase : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Dict=128 ,lowerCamelCase : Union[str, Any]=16 ,lowerCamelCase : List[Any]=True ,lowerCamelCase : List[Any]=0.05 ,lowerCamelCase : Optional[int]=10 ,lowerCamelCase : Any=2 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Tuple=10 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple="mean" ,lowerCamelCase : int=False ,lowerCamelCase : Dict=False ,lowerCamelCase : Optional[int]=256 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : Tuple=2 ,**lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase ,pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = squeeze_factor
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# sequence classification
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE = classifier_proj_size
@property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 13 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"facebook/xlm-roberta-xl": "https://huggingface.co/facebook/xlm-roberta-xl/resolve/main/config.json",
"facebook/xlm-roberta-xxl": "https://huggingface.co/facebook/xlm-roberta-xxl/resolve/main/config.json",
# See all XLM-RoBERTa-XL models at https://huggingface.co/models?filter=xlm-roberta-xl
}
class __a ( __lowerCAmelCase ):
__UpperCamelCase : Any = "xlm-roberta-xl"
def __init__( self : int ,lowerCamelCase : str=25_0880 ,lowerCamelCase : Optional[Any]=2560 ,lowerCamelCase : str=36 ,lowerCamelCase : List[Any]=32 ,lowerCamelCase : Union[str, Any]=1_0240 ,lowerCamelCase : List[Any]="gelu" ,lowerCamelCase : Any=0.1 ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Optional[int]=514 ,lowerCamelCase : int=1 ,lowerCamelCase : Dict=0.02 ,lowerCamelCase : int=1E-0_5 ,lowerCamelCase : Any=1 ,lowerCamelCase : Union[str, Any]=0 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : Optional[Any]="absolute" ,lowerCamelCase : Any=True ,lowerCamelCase : int=None ,**lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase__ ,bos_token_id=lowerCamelCase__ ,eos_token_id=lowerCamelCase__ ,**lowerCamelCase__ )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class __a ( __lowerCAmelCase ):
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
__SCREAMING_SNAKE_CASE = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 717 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 1 , __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
for divide_by_number in range(__UpperCAmelCase , digit + 1 ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = divide_by_number
else:
has_been_divided.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
'''simple docstring'''
import importlib.util
import json
import os
import warnings
from dataclasses import dataclass, field
import torch
from ..training_args import TrainingArguments
from ..utils import cached_property, is_sagemaker_dp_enabled, logging
a = logging.get_logger(__name__)
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = os.getenv("""SM_HP_MP_PARAMETERS""" , """{}""" )
try:
# Parse it and check the field "partitions" is included, it is required for model parallel.
__SCREAMING_SNAKE_CASE = json.loads(snake_case_ )
if "partitions" not in smp_options:
return False
except json.JSONDecodeError:
return False
# Get the sagemaker specific framework parameters from mpi_options variable.
__SCREAMING_SNAKE_CASE = os.getenv("""SM_FRAMEWORK_PARAMS""" , """{}""" )
try:
# Parse it and check the field "sagemaker_distributed_dataparallel_enabled".
__SCREAMING_SNAKE_CASE = json.loads(snake_case_ )
if not mpi_options.get("""sagemaker_mpi_enabled""" , snake_case_ ):
return False
except json.JSONDecodeError:
return False
# Lastly, check if the `smdistributed` module is present.
return importlib.util.find_spec("""smdistributed""" ) is not None
if is_sagemaker_model_parallel_available():
import smdistributed.modelparallel.torch as smp
smp.init()
@dataclass
class __a ( UpperCamelCase_ ):
__UpperCamelCase : Dict = field(
default='', metadata={'help': 'Used by the SageMaker launcher to send mp-specific args. Ignored in SageMakerTrainer'}, )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
super().__post_init__()
warnings.warn(
"""`SageMakerTrainingArguments` is deprecated and will be removed in v5 of Transformers. You can use """
"""`TrainingArguments` instead.""" ,lowerCamelCase ,)
@cached_property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
logger.info("""PyTorch: setting up devices""" )
if torch.distributed.is_available() and torch.distributed.is_initialized() and self.local_rank == -1:
logger.warning(
"""torch.distributed process group is initialized, but local_rank == -1. """
"""In order to use Torch DDP, launch your script with `python -m torch.distributed.launch""" )
if self.no_cuda:
__SCREAMING_SNAKE_CASE = torch.device("""cpu""" )
__SCREAMING_SNAKE_CASE = 0
elif is_sagemaker_model_parallel_available():
__SCREAMING_SNAKE_CASE = smp.local_rank()
__SCREAMING_SNAKE_CASE = torch.device("""cuda""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = 1
elif is_sagemaker_dp_enabled():
import smdistributed.dataparallel.torch.torch_smddp # noqa: F401
torch.distributed.init_process_group(backend="""smddp""" ,timeout=self.ddp_timeout_delta )
__SCREAMING_SNAKE_CASE = int(os.getenv("""SMDATAPARALLEL_LOCAL_RANK""" ) )
__SCREAMING_SNAKE_CASE = torch.device("""cuda""" ,self.local_rank )
__SCREAMING_SNAKE_CASE = 1
elif self.local_rank == -1:
# if n_gpu is > 1 we'll use nn.DataParallel.
# If you only want to use a specific subset of GPUs use `CUDA_VISIBLE_DEVICES=0`
# Explicitly set CUDA to the first (index 0) CUDA device, otherwise `set_device` will
# trigger an error that a device index is missing. Index 0 takes into account the
# GPUs available in the environment, so `CUDA_VISIBLE_DEVICES=1,2` with `cuda:0`
# will use the first GPU in that env, i.e. GPU#1
__SCREAMING_SNAKE_CASE = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# Sometimes the line in the postinit has not been run before we end up here, so just checking we're not at
# the default value.
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
else:
# Here, we'll use torch.distributed.
# Initializes the distributed backend which will take care of synchronizing nodes/GPUs
if not torch.distributed.is_initialized():
torch.distributed.init_process_group(backend="""nccl""" ,timeout=self.ddp_timeout_delta )
__SCREAMING_SNAKE_CASE = torch.device("""cuda""" ,self.local_rank )
__SCREAMING_SNAKE_CASE = 1
if device.type == "cuda":
torch.cuda.set_device(lowerCamelCase )
return device
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if is_sagemaker_model_parallel_available():
return smp.dp_size()
return super().world_size
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return not is_sagemaker_model_parallel_available()
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return False
| 718 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str]=7 ,lowerCamelCase : List[str]=3 ,lowerCamelCase : List[str]=18 ,lowerCamelCase : Any=30 ,lowerCamelCase : Optional[Any]=400 ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=None ,lowerCamelCase : str=True ,lowerCamelCase : Dict=[0.48_145_466, 0.4_578_275, 0.40_821_073] ,lowerCamelCase : List[str]=[0.26_862_954, 0.26_130_258, 0.27_577_711] ,lowerCamelCase : Tuple=True ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 224, """width""": 224}
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
__SCREAMING_SNAKE_CASE = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Union[str, Any]=False ,lowerCamelCase : str=False ,lowerCamelCase : str=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) )
else:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 )
image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
if torchify:
__SCREAMING_SNAKE_CASE = [torch.from_numpy(lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,do_center_crop=lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 224, """width""": 224} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=lowerCamelCase )
__SCREAMING_SNAKE_CASE = 3
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 13 | 0 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __a ( lowercase_ ):
__UpperCamelCase : str = ['''image_processor''', '''tokenizer''']
__UpperCamelCase : Optional[Any] = '''LayoutLMv3ImageProcessor'''
__UpperCamelCase : Tuple = ('''LayoutLMv3Tokenizer''', '''LayoutLMv3TokenizerFast''')
def __init__( self : List[str] ,lowerCamelCase : Tuple=None ,lowerCamelCase : Union[str, Any]=None ,**lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = kwargs.pop("""feature_extractor""" )
__SCREAMING_SNAKE_CASE = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(lowerCamelCase ,lowerCamelCase )
def __call__( self : Optional[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : List[str] = None ,lowerCamelCase : str = None ,lowerCamelCase : Optional[Any] = None ,lowerCamelCase : str = None ,lowerCamelCase : Union[str, Any] = True ,lowerCamelCase : str = False ,lowerCamelCase : Tuple = None ,lowerCamelCase : Dict = None ,lowerCamelCase : Dict = 0 ,lowerCamelCase : Tuple = None ,lowerCamelCase : Dict = None ,lowerCamelCase : Tuple = None ,lowerCamelCase : Union[str, Any] = False ,lowerCamelCase : Tuple = False ,lowerCamelCase : int = False ,lowerCamelCase : Any = False ,lowerCamelCase : List[Any] = True ,lowerCamelCase : Union[str, Any] = None ,**lowerCamelCase : List[Any] ,):
'''simple docstring'''
if self.image_processor.apply_ocr and (boxes is not None):
raise ValueError(
"""You cannot provide bounding boxes if you initialized the image processor with apply_ocr set to True.""" )
if self.image_processor.apply_ocr and (word_labels is not None):
raise ValueError(
"""You cannot provide word labels if you initialized the image processor with apply_ocr set to True.""" )
# first, apply the image processor
__SCREAMING_SNAKE_CASE = self.image_processor(images=lowerCamelCase ,return_tensors=lowerCamelCase )
# second, apply the tokenizer
if text is not None and self.image_processor.apply_ocr and text_pair is None:
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = [text] # add batch dimension (as the image processor always adds a batch dimension)
__SCREAMING_SNAKE_CASE = features["""words"""]
__SCREAMING_SNAKE_CASE = self.tokenizer(
text=text if text is not None else features["""words"""] ,text_pair=text_pair if text_pair is not None else None ,boxes=boxes if boxes is not None else features["""boxes"""] ,word_labels=lowerCamelCase ,add_special_tokens=lowerCamelCase ,padding=lowerCamelCase ,truncation=lowerCamelCase ,max_length=lowerCamelCase ,stride=lowerCamelCase ,pad_to_multiple_of=lowerCamelCase ,return_token_type_ids=lowerCamelCase ,return_attention_mask=lowerCamelCase ,return_overflowing_tokens=lowerCamelCase ,return_special_tokens_mask=lowerCamelCase ,return_offsets_mapping=lowerCamelCase ,return_length=lowerCamelCase ,verbose=lowerCamelCase ,return_tensors=lowerCamelCase ,**lowerCamelCase ,)
# add pixel values
__SCREAMING_SNAKE_CASE = features.pop("""pixel_values""" )
if return_overflowing_tokens is True:
__SCREAMING_SNAKE_CASE = self.get_overflowing_images(lowerCamelCase ,encoded_inputs["""overflow_to_sample_mapping"""] )
__SCREAMING_SNAKE_CASE = images
return encoded_inputs
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
for sample_idx in overflow_to_sample_mapping:
images_with_overflow.append(images[sample_idx] )
if len(lowerCamelCase ) != len(lowerCamelCase ):
raise ValueError(
"""Expected length of images to be the same as the length of `overflow_to_sample_mapping`, but got"""
f""" {len(lowerCamelCase )} and {len(lowerCamelCase )}""" )
return images_with_overflow
def UpperCAmelCase__ ( self : Optional[int] ,*lowerCamelCase : Tuple ,**lowerCamelCase : Tuple ):
'''simple docstring'''
return self.tokenizer.batch_decode(*lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ,*lowerCamelCase : Tuple ,**lowerCamelCase : Union[str, Any] ):
'''simple docstring'''
return self.tokenizer.decode(*lowerCamelCase ,**lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return ["input_ids", "bbox", "attention_mask", "pixel_values"]
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,lowerCamelCase ,)
return self.image_processor_class
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,lowerCamelCase ,)
return self.image_processor
| 719 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
def wrapper(*__UpperCAmelCase , **__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = timeit.default_timer()
__SCREAMING_SNAKE_CASE = func(*__UpperCAmelCase , **__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = timeit.default_timer() - starttime
return delta
__SCREAMING_SNAKE_CASE = func.__name__
return wrapper
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = seq_shapes or {}
for i in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__UpperCAmelCase , _ArrayXD ):
__SCREAMING_SNAKE_CASE = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__UpperCAmelCase , datasets.Value ):
if v.dtype == "string":
__SCREAMING_SNAKE_CASE = """The small grey turtle was surprisingly fast when challenged."""
else:
__SCREAMING_SNAKE_CASE = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__UpperCAmelCase , datasets.Sequence ):
while isinstance(__UpperCAmelCase , datasets.Sequence ):
__SCREAMING_SNAKE_CASE = v.feature
__SCREAMING_SNAKE_CASE = seq_shapes[k]
__SCREAMING_SNAKE_CASE = np.random.rand(*__UpperCAmelCase ).astype(v.dtype )
__SCREAMING_SNAKE_CASE = data
dummy_data.append((i, example) )
return dummy_data
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = generate_examples(__UpperCAmelCase , num_examples=__UpperCAmelCase , seq_shapes=__UpperCAmelCase )
with ArrowWriter(features=__UpperCAmelCase , path=__UpperCAmelCase ) as writer:
for key, record in dummy_data:
__SCREAMING_SNAKE_CASE = features.encode_example(__UpperCAmelCase )
writer.write(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
__SCREAMING_SNAKE_CASE = datasets.Dataset.from_file(filename=__UpperCAmelCase , info=datasets.DatasetInfo(features=__UpperCAmelCase ) )
return dataset
| 13 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 200 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [1, 2, 5, 10, 20, 50, 100, 200]
__SCREAMING_SNAKE_CASE = [0] * (pence + 1)
__SCREAMING_SNAKE_CASE = 1 # base case: 1 way to make 0 pence
for coin in coins:
for i in range(lowerCamelCase__ , pence + 1 , 1 ):
number_of_ways[i] += number_of_ways[i - coin]
return number_of_ways[pence]
if __name__ == "__main__":
assert solution(200) == 73682
| 720 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a = "__DUMMY_TRANSFORMERS_USER__"
a = "Dummy User"
a = "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"
a = "https://hub-ci.huggingface.co"
a = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
a = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
a = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(__UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
return HfApi(endpoint=__UpperCAmelCase )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfFolder.get_token()
HfFolder.save_token(__UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
def _cleanup_repo(__UpperCAmelCase ):
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
@contextmanager
def _temporary_repo(__UpperCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(__UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 13 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class __a :
def __init__( self : Union[str, Any] ,lowerCamelCase : Dict ,lowerCamelCase : List[str]=13 ,lowerCamelCase : Any=32 ,lowerCamelCase : List[Any]=2 ,lowerCamelCase : str=3 ,lowerCamelCase : Any=16 ,lowerCamelCase : Optional[int]=[32, 64, 128] ,lowerCamelCase : str=[1, 2, 1] ,lowerCamelCase : Tuple=[2, 2, 4] ,lowerCamelCase : str=2 ,lowerCamelCase : Tuple=2.0 ,lowerCamelCase : Dict=True ,lowerCamelCase : Optional[Any]=0.0 ,lowerCamelCase : str=0.0 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : List[Any]="gelu" ,lowerCamelCase : Union[str, Any]=False ,lowerCamelCase : Any=True ,lowerCamelCase : List[Any]=0.02 ,lowerCamelCase : str=1E-5 ,lowerCamelCase : Dict=True ,lowerCamelCase : Any=None ,lowerCamelCase : str=True ,lowerCamelCase : Any=10 ,lowerCamelCase : List[Any]=8 ,lowerCamelCase : Tuple=["stage1", "stage2"] ,lowerCamelCase : Any=[1, 2] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = hidden_sizes
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = window_size
__SCREAMING_SNAKE_CASE = mlp_ratio
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = use_absolute_embeddings
__SCREAMING_SNAKE_CASE = patch_norm
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = encoder_stride
__SCREAMING_SNAKE_CASE = out_features
__SCREAMING_SNAKE_CASE = out_indices
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : int ,lowerCamelCase : Dict ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FocalNetModel(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__SCREAMING_SNAKE_CASE = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FocalNetBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = FocalNetBackbone(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Dict ,lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FocalNetForMaskedImageModeling(config=_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(
result.reconstruction.shape ,(self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FocalNetForMaskedImageModeling(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.reconstruction.shape ,(self.batch_size, 1, self.image_size, self.image_size) )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = FocalNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE ,labels=_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FocalNetForImageClassification(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(_SCREAMING_SNAKE_CASE )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( snake_case__, snake_case__, unittest.TestCase ):
__UpperCamelCase : int = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Optional[Any] = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
__UpperCamelCase : Optional[int] = False
__UpperCamelCase : Dict = False
__UpperCamelCase : str = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : str = False
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FocalNetModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=_SCREAMING_SNAKE_CASE ,embed_dim=37 ,has_text_modality=_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_SCREAMING_SNAKE_CASE )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__SCREAMING_SNAKE_CASE = model_class(_SCREAMING_SNAKE_CASE )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
__SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_SCREAMING_SNAKE_CASE ,nn.Linear ) )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
__SCREAMING_SNAKE_CASE = model_class(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_class(_SCREAMING_SNAKE_CASE )
model.to(_SCREAMING_SNAKE_CASE )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ) )
__SCREAMING_SNAKE_CASE = outputs.hidden_states
__SCREAMING_SNAKE_CASE = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
# FocalNet has a different seq_length
__SCREAMING_SNAKE_CASE = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
__SCREAMING_SNAKE_CASE = outputs.reshaped_hidden_states
self.assertEqual(len(_SCREAMING_SNAKE_CASE ) ,_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = reshaped_hidden_states[0].shape
__SCREAMING_SNAKE_CASE = (
reshaped_hidden_states[0].view(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,height * width ).permute(0 ,2 ,1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__SCREAMING_SNAKE_CASE = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__SCREAMING_SNAKE_CASE = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__SCREAMING_SNAKE_CASE = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,_SCREAMING_SNAKE_CASE ,(padded_height, padded_width) )
@slow
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__SCREAMING_SNAKE_CASE = FocalNetModel.from_pretrained(_SCREAMING_SNAKE_CASE )
self.assertIsNotNone(_SCREAMING_SNAKE_CASE )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = _config_zero_init(_SCREAMING_SNAKE_CASE )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(config=_SCREAMING_SNAKE_CASE )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1E9).round() / 1E9).item() ,[0.0, 1.0] ,msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" ,)
@require_vision
@require_torch
class __a ( unittest.TestCase ):
@cached_property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = self.default_image_processor
__SCREAMING_SNAKE_CASE = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
__SCREAMING_SNAKE_CASE = image_processor(images=_SCREAMING_SNAKE_CASE ,return_tensors="""pt""" ).to(_SCREAMING_SNAKE_CASE )
# forward pass
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**_SCREAMING_SNAKE_CASE )
# verify the logits
__SCREAMING_SNAKE_CASE = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,_SCREAMING_SNAKE_CASE )
__SCREAMING_SNAKE_CASE = torch.tensor([0.2_166, -0.4_368, 0.2_191] ).to(_SCREAMING_SNAKE_CASE )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,_SCREAMING_SNAKE_CASE ,atol=1E-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() ,281 )
@require_torch
class __a ( snake_case__, unittest.TestCase ):
__UpperCamelCase : List[str] = (FocalNetBackbone,) if is_torch_available() else ()
__UpperCamelCase : Optional[Any] = FocalNetConfig
__UpperCamelCase : List[Any] = False
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FocalNetModelTester(self )
| 721 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : Dict = 'deta'
__UpperCamelCase : List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Any=900 ,lowerCamelCase : int=2048 ,lowerCamelCase : Any=6 ,lowerCamelCase : Optional[Any]=2048 ,lowerCamelCase : str=8 ,lowerCamelCase : Union[str, Any]=6 ,lowerCamelCase : List[str]=1024 ,lowerCamelCase : int=8 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Any=True ,lowerCamelCase : Optional[int]="relu" ,lowerCamelCase : int=256 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[Any]=0.0 ,lowerCamelCase : Tuple=0.0 ,lowerCamelCase : List[str]=0.02 ,lowerCamelCase : Any=1.0 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]="sine" ,lowerCamelCase : Dict=5 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Any=True ,lowerCamelCase : int=300 ,lowerCamelCase : Any=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : int=1 ,lowerCamelCase : Tuple=5 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : int=1 ,lowerCamelCase : str=5 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.25 ,**lowerCamelCase : int ,):
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase )
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = auxiliary_loss
__SCREAMING_SNAKE_CASE = position_embedding_type
# deformable attributes
__SCREAMING_SNAKE_CASE = num_feature_levels
__SCREAMING_SNAKE_CASE = encoder_n_points
__SCREAMING_SNAKE_CASE = decoder_n_points
__SCREAMING_SNAKE_CASE = two_stage
__SCREAMING_SNAKE_CASE = two_stage_num_proposals
__SCREAMING_SNAKE_CASE = with_box_refine
__SCREAMING_SNAKE_CASE = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
__SCREAMING_SNAKE_CASE = class_cost
__SCREAMING_SNAKE_CASE = bbox_cost
__SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE = mask_loss_coefficient
__SCREAMING_SNAKE_CASE = dice_loss_coefficient
__SCREAMING_SNAKE_CASE = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE = giou_loss_coefficient
__SCREAMING_SNAKE_CASE = eos_coefficient
__SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.d_model
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 13 | 0 |
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
if not isinstance(__UpperCAmelCase , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = f"""Input value of [number={number}] must be an integer"""
raise TypeError(__UpperCAmelCase )
if number < 1:
__SCREAMING_SNAKE_CASE = f"""Input value of [number={number}] must be > 0"""
raise ValueError(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = 1
for i in range(1 , __UpperCAmelCase ):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 700 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape
__SCREAMING_SNAKE_CASE = jax.image.resize(
lowerCamelCase ,shape=(batch, height * 2, width * 2, channels) ,method="""nearest""" ,)
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : int = None
__UpperCamelCase : float = 0.0
__UpperCamelCase : bool = None
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = nn.Dense(lowerCamelCase ,dtype=self.dtype )
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__SCREAMING_SNAKE_CASE = None
if use_nin_shortcut:
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="""VALID""" ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(lowerCamelCase ,1 ) ,1 )
__SCREAMING_SNAKE_CASE = hidden_states + temb
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.dropout(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
if self.conv_shortcut is not None:
__SCREAMING_SNAKE_CASE = self.conv_shortcut(lowerCamelCase )
return hidden_states + residual
| 13 | 0 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __a( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = (32, 32)
__SCREAMING_SNAKE_CASE = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(lowerCamelCase )
return image
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = UNetaDConditionModel(
block_out_channels=(32, 32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=7 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,attention_head_dim=8 ,use_linear_projection=lowerCamelCase ,only_cross_attention=(True, True, False) ,num_class_embeds=100 ,)
return model
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = AutoencoderKL(
block_out_channels=[32, 32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1E-0_5 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
return CLIPTextModel(lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale
__SCREAMING_SNAKE_CASE = DDPMScheduler()
__SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE = self.dummy_vae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline(
unet=lowerCamelCase ,low_res_scheduler=lowerCamelCase ,scheduler=lowerCamelCase ,vae=lowerCamelCase ,text_encoder=lowerCamelCase ,tokenizer=lowerCamelCase ,max_noise_level=350 ,)
__SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] ,image=lowerCamelCase ,generator=lowerCamelCase ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
__SCREAMING_SNAKE_CASE = output.images
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] ,image=lowerCamelCase ,generator=lowerCamelCase ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=lowerCamelCase ,)[0]
__SCREAMING_SNAKE_CASE = image[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = image_from_tuple[0, -3:, -3:, -1]
__SCREAMING_SNAKE_CASE = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
__SCREAMING_SNAKE_CASE = np.array([0.3_113, 0.3_910, 0.4_272, 0.4_859, 0.5_061, 0.4_652, 0.5_362, 0.5_715, 0.5_661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """cpu""" # ensure determinism for the device-dependent torch.Generator
__SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale
__SCREAMING_SNAKE_CASE = DDPMScheduler()
__SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE = self.dummy_vae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline(
unet=lowerCamelCase ,low_res_scheduler=lowerCamelCase ,scheduler=lowerCamelCase ,vae=lowerCamelCase ,text_encoder=lowerCamelCase ,tokenizer=lowerCamelCase ,max_noise_level=350 ,)
__SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE = sd_pipe(
2 * [prompt] ,image=2 * [low_res_image] ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
__SCREAMING_SNAKE_CASE = output.images
assert image.shape[0] == 2
__SCREAMING_SNAKE_CASE = torch.Generator(device=lowerCamelCase ).manual_seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] ,image=lowerCamelCase ,generator=lowerCamelCase ,num_images_per_prompt=2 ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
__SCREAMING_SNAKE_CASE = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.dummy_cond_unet_upscale
__SCREAMING_SNAKE_CASE = DDPMScheduler()
__SCREAMING_SNAKE_CASE = DDIMScheduler(prediction_type="""v_prediction""" )
__SCREAMING_SNAKE_CASE = self.dummy_vae
__SCREAMING_SNAKE_CASE = self.dummy_text_encoder
__SCREAMING_SNAKE_CASE = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
__SCREAMING_SNAKE_CASE = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
__SCREAMING_SNAKE_CASE = Image.fromarray(np.uinta(lowerCamelCase ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
__SCREAMING_SNAKE_CASE = unet.half()
__SCREAMING_SNAKE_CASE = text_encoder.half()
# make sure here that pndm scheduler skips prk
__SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline(
unet=lowerCamelCase ,low_res_scheduler=lowerCamelCase ,scheduler=lowerCamelCase ,vae=lowerCamelCase ,text_encoder=lowerCamelCase ,tokenizer=lowerCamelCase ,max_noise_level=350 ,)
__SCREAMING_SNAKE_CASE = sd_pipe.to(lowerCamelCase )
sd_pipe.set_progress_bar_config(disable=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """A painting of a squirrel eating a burger"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = sd_pipe(
[prompt] ,image=lowerCamelCase ,generator=lowerCamelCase ,num_inference_steps=2 ,output_type="""np""" ,).images
__SCREAMING_SNAKE_CASE = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __a( unittest.TestCase ):
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
__SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-x4-upscaler"""
__SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained(lowerCamelCase )
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = """a cat sitting on a park bench"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase ,image=lowerCamelCase ,generator=lowerCamelCase ,output_type="""np""" ,)
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1E-3
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
__SCREAMING_SNAKE_CASE = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
__SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-x4-upscaler"""
__SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase ,torch_dtype=torch.floataa ,)
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing()
__SCREAMING_SNAKE_CASE = """a cat sitting on a park bench"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase ,image=lowerCamelCase ,generator=lowerCamelCase ,output_type="""np""" ,)
__SCREAMING_SNAKE_CASE = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5E-1
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
__SCREAMING_SNAKE_CASE = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
__SCREAMING_SNAKE_CASE = """stabilityai/stable-diffusion-x4-upscaler"""
__SCREAMING_SNAKE_CASE = StableDiffusionUpscalePipeline.from_pretrained(
lowerCamelCase ,torch_dtype=torch.floataa ,)
pipe.to(lowerCamelCase )
pipe.set_progress_bar_config(disable=lowerCamelCase )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
__SCREAMING_SNAKE_CASE = """a cat sitting on a park bench"""
__SCREAMING_SNAKE_CASE = torch.manual_seed(0 )
__SCREAMING_SNAKE_CASE = pipe(
prompt=lowerCamelCase ,image=lowerCamelCase ,generator=lowerCamelCase ,num_inference_steps=5 ,output_type="""np""" ,)
__SCREAMING_SNAKE_CASE = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 701 |
'''simple docstring'''
import sys
from collections import defaultdict
class __a :
def __init__( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.node_position[vertex]
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pos
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = temp, tempa
__SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,lowerCamelCase )
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE = heap[parent]
__SCREAMING_SNAKE_CASE = position[parent]
self.set_position(position[parent] ,lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,lowerCamelCase )
break
__SCREAMING_SNAKE_CASE = parent
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,0 )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(lowerCamelCase ) // 2 - 1
for i in range(lowerCamelCase ,-1 ,-1 ):
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,len(lowerCamelCase ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = positions[0]
__SCREAMING_SNAKE_CASE = sys.maxsize
self.top_to_bottom(lowerCamelCase ,0 ,len(lowerCamelCase ) ,lowerCamelCase )
return temp
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Heap()
__SCREAMING_SNAKE_CASE = [0] * len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE = []
for vertex in range(len(__UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__UpperCAmelCase )
heap.node_position.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = distance
heap.heapify(__UpperCAmelCase , __UpperCAmelCase )
for _ in range(1 , len(__UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__UpperCAmelCase )]
):
__SCREAMING_SNAKE_CASE = distance
heap.bottom_to_top(
__UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
a = int(input("Enter number of edges: ").strip())
a = defaultdict(list)
for _ in range(edges_number):
a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import torch
from transformers import BloomConfig, BloomModel
from transformers.file_utils import CONFIG_NAME, WEIGHTS_NAME
from transformers.utils import logging
logging.set_verbosity_info()
a = [
"word_embeddings_layernorm.weight",
"word_embeddings_layernorm.bias",
"input_layernorm.weight",
"input_layernorm.bias",
"post_attention_layernorm.weight",
"post_attention_layernorm.bias",
"self_attention.dense.bias",
"mlp.dense_4h_to_h.bias",
"ln_f.weight",
"ln_f.bias",
]
a = [
"mlp.dense_4h_to_h.weight",
"self_attention.dense.weight",
]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {
"""word_embeddings.weight""": """word_embeddings.weight""",
"""word_embeddings.norm.weight""": """word_embeddings_layernorm.weight""",
"""word_embeddings.norm.bias""": """word_embeddings_layernorm.bias""",
"""weight""": """ln_f.weight""",
"""bias""": """ln_f.bias""",
}
if key in layer_rename_map:
return layer_rename_map[key]
# Handle transformer blocks
__SCREAMING_SNAKE_CASE = int(re.match(R""".*layer_(\d*).*""" , __UpperCAmelCase )[1] )
layer_number -= 3
return f"""h.{layer_number}.""" + key
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
if dtype == torch.bool:
return 1 / 8
__SCREAMING_SNAKE_CASE = re.search(R"""[^\d](\d+)$""" , str(__UpperCAmelCase ) )
if bit_search is None:
raise ValueError(f"""`dtype` is not a valid dtype: {dtype}.""" )
__SCREAMING_SNAKE_CASE = int(bit_search.groups()[0] )
return bit_size // 8
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
if bloom_config_file == "":
__SCREAMING_SNAKE_CASE = BloomConfig()
else:
__SCREAMING_SNAKE_CASE = BloomConfig.from_json_file(__UpperCAmelCase )
if shard_model:
__SCREAMING_SNAKE_CASE = os.listdir(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = sorted(filter(lambda __UpperCAmelCase : s.startswith("""layer""" ) and "model_00" in s , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = {"""weight_map""": {}, """metadata""": {}}
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BloomConfig()
for j, file in enumerate(__UpperCAmelCase ):
print("""Processing file: {}""".format(__UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = None
for i in range(__UpperCAmelCase ):
# load all TP files
__SCREAMING_SNAKE_CASE = file.replace("""model_00""" , f"""model_0{i}""" )
__SCREAMING_SNAKE_CASE = torch.load(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , map_location="""cpu""" )
# Rename keys in the transformers names
__SCREAMING_SNAKE_CASE = list(temp.keys() )
for key in keys:
__SCREAMING_SNAKE_CASE = temp.pop(__UpperCAmelCase )
if tensors is None:
__SCREAMING_SNAKE_CASE = temp
else:
for key in tensors.keys():
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__SCREAMING_SNAKE_CASE = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__SCREAMING_SNAKE_CASE = torch.cat([tensors[key], temp[key]] , dim=__UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__SCREAMING_SNAKE_CASE = tensors[key] / pretraining_tp
torch.save(
__UpperCAmelCase , os.path.join(
__UpperCAmelCase , """pytorch_model_{}-of-{}.bin""".format(str(j + 1 ).zfill(5 ) , str(len(__UpperCAmelCase ) ).zfill(5 ) ) , ) , )
for key in tensors.keys():
__SCREAMING_SNAKE_CASE = tensors[key]
total_size += value.numel() * get_dtype_size(value.dtype )
if key not in index_dict["weight_map"]:
__SCREAMING_SNAKE_CASE = """pytorch_model_{}-of-{}.bin""".format(
str(j + 1 ).zfill(5 ) , str(len(__UpperCAmelCase ) ).zfill(5 ) )
__SCREAMING_SNAKE_CASE = BloomConfig()
__SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + """/""" + CONFIG_NAME
__SCREAMING_SNAKE_CASE = total_size
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
with open(os.path.join(__UpperCAmelCase , WEIGHTS_NAME + """.index.json""" ) , """w""" , encoding="""utf-8""" ) as f:
__SCREAMING_SNAKE_CASE = json.dumps(__UpperCAmelCase , indent=2 , sort_keys=__UpperCAmelCase ) + """\n"""
f.write(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = BloomModel(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = os.listdir(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = sorted(filter(lambda __UpperCAmelCase : s.startswith("""layer""" ) and "model_00" in s , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = None
for i, file in enumerate(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = None
for i in range(__UpperCAmelCase ):
# load all TP files
__SCREAMING_SNAKE_CASE = file.replace("""model_00""" , f"""model_0{i}""" )
__SCREAMING_SNAKE_CASE = torch.load(os.path.join(__UpperCAmelCase , __UpperCAmelCase ) , map_location="""cpu""" )
# Rename keys in the transformers names
__SCREAMING_SNAKE_CASE = list(temp.keys() )
for key in keys:
__SCREAMING_SNAKE_CASE = temp.pop(__UpperCAmelCase )
if tensors is None:
__SCREAMING_SNAKE_CASE = temp
else:
for key in tensors.keys():
# We average (sum and then divide) some weights accross TP ranks (see https://github.com/bigscience-workshop/Megatron-DeepSpeed/blob/olruwase/sync_layer_norms/megatron/training.py#L425)
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
tensors[key] += temp[key]
else:
# Some weights are RowParallelLinear in Megatron-Deepspeed, others are ColumnParallel
__SCREAMING_SNAKE_CASE = 1 if any(text in key for text in WEIGHTS_WITH_ROW_PARALLELISM_CONTAIN ) else 0
# We concatenate these weights accross TP ranks
__SCREAMING_SNAKE_CASE = torch.cat([tensors[key], temp[key]] , dim=__UpperCAmelCase )
# Divide by the number of TP the weights we want to average
for key in tensors.keys():
if any(key.endswith(__UpperCAmelCase ) for end in WEIGHTS_TO_AVERAGE_ENDSWITH ):
__SCREAMING_SNAKE_CASE = tensors[key] / pretraining_tp
__SCREAMING_SNAKE_CASE = model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
assert not other_keys.unexpected_keys, f"""The keys {other_keys.unexpected_keys} are unexpected"""
if missing_keys is None:
__SCREAMING_SNAKE_CASE = set(other_keys.missing_keys )
else:
__SCREAMING_SNAKE_CASE = missing_keys.intersection(set(other_keys.missing_keys ) )
assert not missing_keys, f"""The keys {missing_keys} are missing"""
# Save pytorch-model
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + """/""" + WEIGHTS_NAME
__SCREAMING_SNAKE_CASE = pytorch_dump_folder_path + """/""" + CONFIG_NAME
print(f"""Save PyTorch model to {pytorch_weights_dump_path} with dtype {config.torch_dtype}""" )
if config.torch_dtype is not None:
__SCREAMING_SNAKE_CASE = model.to(config.torch_dtype )
torch.save(model.state_dict() , __UpperCAmelCase )
print(f"""Save configuration file to {pytorch_config_dump_path}""" )
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
a = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--bloom_checkpoint_path",
default=None,
type=str,
required=True,
help="Path to the Megatron-LM checkpoint path.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
parser.add_argument(
"--bloom_config_file",
default="",
type=str,
help=(
"An optional config json file corresponding to the pre-trained model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--shard_model",
action="store_true",
help="An optional setting to shard the output model \nThis enables sharding the converted checkpoint",
)
parser.add_argument(
"--pretraining_tp",
default=4,
type=int,
help="Pretraining TP rank that has been used when training the model in Megatron-LM \n",
)
a = parser.parse_args()
convert_bloom_checkpoint_to_pytorch(
args.bloom_checkpoint_path,
args.bloom_config_file,
args.pytorch_dump_folder_path,
args.shard_model,
args.pretraining_tp,
)
| 702 |
'''simple docstring'''
import os
import string
import sys
a = 1 << 8
a = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
a = KEYMAP["up"]
a = KEYMAP["left"]
if sys.platform == "win32":
a = []
a = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
a = ord(str(i))
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
__SCREAMING_SNAKE_CASE = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCAmelCase ) == 0:
# Read the keystroke
__SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__UpperCAmelCase )
if ord(__UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__SCREAMING_SNAKE_CASE = chr(KEYMAP["""esc"""] )
except KeyError:
__SCREAMING_SNAKE_CASE = cha[1]
else:
__SCREAMING_SNAKE_CASE = ch.decode(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__SCREAMING_SNAKE_CASE = sys.stdin.fileno()
__SCREAMING_SNAKE_CASE = termios.tcgetattr(__UpperCAmelCase )
try:
tty.setraw(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase )
return ch
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCAmelCase ) == KEYMAP["esc"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 13 | 0 |
'''simple docstring'''
import numpy
# List of input, output pairs
a = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
a = (((515, 22, 13), 555), ((61, 35, 49), 150))
a = [2, 4, 1, 5]
a = len(train_data)
a = 0.009
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase="train" ) -> Union[str, Any]:
'''simple docstring'''
return calculate_hypothesis_value(__UpperCAmelCase , __UpperCAmelCase ) - output(
__UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
for i in range(len(__UpperCAmelCase ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=m ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
for i in range(__UpperCAmelCase ):
if index == -1:
summation_value += _error(__UpperCAmelCase )
else:
summation_value += _error(__UpperCAmelCase ) * train_data[i][0][index]
return summation_value
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = summation_of_cost_derivative(__UpperCAmelCase , __UpperCAmelCase ) / m
return cost_derivative_value
def __magic_name__ ( ) -> Any:
'''simple docstring'''
global parameter_vector
# Tune these values to set a tolerance value for predicted output
__SCREAMING_SNAKE_CASE = 0.0_0_0_0_0_2
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while True:
j += 1
__SCREAMING_SNAKE_CASE = [0, 0, 0, 0]
for i in range(0 , len(__UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = get_cost_derivative(i - 1 )
__SCREAMING_SNAKE_CASE = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
__UpperCAmelCase , __UpperCAmelCase , atol=__UpperCAmelCase , rtol=__UpperCAmelCase , ):
break
__SCREAMING_SNAKE_CASE = temp_parameter_vector
print(("""Number of iterations:""", j) )
def __magic_name__ ( ) -> str:
'''simple docstring'''
for i in range(len(__UpperCAmelCase ) ):
print(("""Actual output value:""", output(__UpperCAmelCase , """test""" )) )
print(("""Hypothesis output:""", calculate_hypothesis_value(__UpperCAmelCase , """test""" )) )
if __name__ == "__main__":
run_gradient_descent()
print("\nTesting gradient descent for a linear hypothesis function.\n")
test_gradient_descent()
| 703 |
'''simple docstring'''
from __future__ import annotations
import bisect
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
__SCREAMING_SNAKE_CASE = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE = midpoint - 1
else:
__SCREAMING_SNAKE_CASE = midpoint + 1
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if index != len(__UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
if right < left:
return None
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase )
if __name__ == "__main__":
a = input("Enter numbers separated by comma:\n").strip()
a = sorted(int(item) for item in user_input.split(","))
a = int(input("Enter a single number to be found in the list:\n"))
a = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 13 | 0 |
'''simple docstring'''
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"vocab_file": "vocab.json",
"merges_file": "merges.txt",
"tokenizer_config_file": "tokenizer_config.json",
}
a = {
"vocab_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json"
},
"merges_file": {
"facebook/blenderbot_small-90M": "https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt"
},
"tokenizer_config_file": {
"facebook/blenderbot_small-90M": (
"https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json"
)
},
}
a = {"facebook/blenderbot_small-90M": 512}
def __magic_name__ ( __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = set()
__SCREAMING_SNAKE_CASE = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__SCREAMING_SNAKE_CASE = char
__SCREAMING_SNAKE_CASE = set(__UpperCAmelCase )
return pairs
class __a ( _snake_case ):
__UpperCamelCase : List[Any] = VOCAB_FILES_NAMES
__UpperCamelCase : Dict = PRETRAINED_VOCAB_FILES_MAP
__UpperCamelCase : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
__UpperCamelCase : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : List[str] ,lowerCamelCase : Tuple="__start__" ,lowerCamelCase : Tuple="__end__" ,lowerCamelCase : Dict="__unk__" ,lowerCamelCase : str="__null__" ,**lowerCamelCase : List[Any] ,):
'''simple docstring'''
super().__init__(unk_token=lowerCamelCase ,bos_token=lowerCamelCase ,eos_token=lowerCamelCase ,pad_token=lowerCamelCase ,**lowerCamelCase )
with open(lowerCamelCase ,encoding="""utf-8""" ) as vocab_handle:
__SCREAMING_SNAKE_CASE = json.load(lowerCamelCase )
__SCREAMING_SNAKE_CASE = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase ,encoding="""utf-8""" ) as merges_handle:
__SCREAMING_SNAKE_CASE = merges_handle.read().split("""\n""" )[1:-1]
__SCREAMING_SNAKE_CASE = [tuple(merge.split() ) for merge in merges]
__SCREAMING_SNAKE_CASE = dict(zip(lowerCamelCase ,range(len(lowerCamelCase ) ) ) )
__SCREAMING_SNAKE_CASE = {}
@property
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return len(self.encoder )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
return dict(self.encoder ,**self.added_tokens_encoder )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : str ):
'''simple docstring'''
if token in self.cache:
return self.cache[token]
__SCREAMING_SNAKE_CASE = re.sub("""([.,!?()])""" ,r""" \1""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = re.sub("""(')""" ,r""" \1 """ ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = re.sub(r"""\s{2,}""" ,""" """ ,lowerCamelCase )
if "\n" in token:
__SCREAMING_SNAKE_CASE = token.replace("""\n""" ,""" __newln__""" )
__SCREAMING_SNAKE_CASE = token.split(""" """ )
__SCREAMING_SNAKE_CASE = []
for token in tokens:
if not len(lowerCamelCase ):
continue
__SCREAMING_SNAKE_CASE = token.lower()
__SCREAMING_SNAKE_CASE = tuple(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
__SCREAMING_SNAKE_CASE = get_pairs(lowerCamelCase )
if not pairs:
words.append(lowerCamelCase )
continue
while True:
__SCREAMING_SNAKE_CASE = min(lowerCamelCase ,key=lambda lowerCamelCase : self.bpe_ranks.get(lowerCamelCase ,float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = bigram
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 0
while i < len(lowerCamelCase ):
try:
__SCREAMING_SNAKE_CASE = word.index(lowerCamelCase ,lowerCamelCase )
new_word.extend(word[i:j] )
__SCREAMING_SNAKE_CASE = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__SCREAMING_SNAKE_CASE = tuple(lowerCamelCase )
__SCREAMING_SNAKE_CASE = new_word
if len(lowerCamelCase ) == 1:
break
else:
__SCREAMING_SNAKE_CASE = get_pairs(lowerCamelCase )
__SCREAMING_SNAKE_CASE = """@@ """.join(lowerCamelCase )
__SCREAMING_SNAKE_CASE = word[:-4]
__SCREAMING_SNAKE_CASE = word
words.append(lowerCamelCase )
return " ".join(lowerCamelCase )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = re.findall(r"""\S+\n?""" ,lowerCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase ).split(""" """ ) ) )
return split_tokens
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = token.lower()
return self.encoder.get(lowerCamelCase ,self.encoder.get(self.unk_token ) )
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : int ):
'''simple docstring'''
return self.decoder.get(lowerCamelCase ,self.unk_token )
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """ """.join(lowerCamelCase ).replace("""@@ """ ,"""""" ).strip()
return out_string
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : str ,lowerCamelCase : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
__SCREAMING_SNAKE_CASE = os.path.join(
lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE = os.path.join(
lowerCamelCase ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowerCamelCase ,ensure_ascii=lowerCamelCase ) + """\n""" )
__SCREAMING_SNAKE_CASE = 0
with open(lowerCamelCase ,"""w""" ,encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda lowerCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
__SCREAMING_SNAKE_CASE = token_index
writer.write(""" """.join(lowerCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file
| 704 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a = logging.get_logger(__name__)
class __a ( _snake_case ):
__UpperCamelCase : int = 'linear'
__UpperCamelCase : Tuple = 'cosine'
__UpperCamelCase : Tuple = 'cosine_with_restarts'
__UpperCamelCase : List[Any] = 'polynomial'
__UpperCamelCase : Optional[Any] = 'constant'
__UpperCamelCase : Optional[int] = 'constant_with_warmup'
__UpperCamelCase : List[Any] = 'piecewise_constant'
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> List[Any]:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE = int(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = float(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ):
def rule_func(__UpperCAmelCase ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(__UpperCAmelCase , __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> int:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Tuple:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SchedulerType(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
| 13 | 0 |
'''simple docstring'''
import math
from collections import defaultdict
from typing import List, Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin, SchedulerOutput
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=0.9_9_9 , __UpperCAmelCase="cosine" , ) -> Tuple:
'''simple docstring'''
if alpha_transform_type == "cosine":
def alpha_bar_fn(__UpperCAmelCase ):
return math.cos((t + 0.0_0_8) / 1.0_0_8 * math.pi / 2 ) ** 2
elif alpha_transform_type == "exp":
def alpha_bar_fn(__UpperCAmelCase ):
return math.exp(t * -12.0 )
else:
raise ValueError(f"""Unsupported alpha_tranform_type: {alpha_transform_type}""" )
__SCREAMING_SNAKE_CASE = []
for i in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = i / num_diffusion_timesteps
__SCREAMING_SNAKE_CASE = (i + 1) / num_diffusion_timesteps
betas.append(min(1 - alpha_bar_fn(__UpperCAmelCase ) / alpha_bar_fn(__UpperCAmelCase ) , __UpperCAmelCase ) )
return torch.tensor(__UpperCAmelCase , dtype=torch.floataa )
class __a ( _snake_case, _snake_case ):
'''simple docstring'''
__UpperCamelCase : int = [e.name for e in KarrasDiffusionSchedulers]
__UpperCamelCase : str = 2
@register_to_config
def __init__( self : int ,lowerCamelCase : int = 1000 ,lowerCamelCase : float = 0.00_085 ,lowerCamelCase : float = 0.012 ,lowerCamelCase : str = "linear" ,lowerCamelCase : Optional[Union[np.ndarray, List[float]]] = None ,lowerCamelCase : str = "epsilon" ,lowerCamelCase : Optional[bool] = False ,lowerCamelCase : Optional[bool] = False ,lowerCamelCase : float = 1.0 ,lowerCamelCase : str = "linspace" ,lowerCamelCase : int = 0 ,):
'''simple docstring'''
if trained_betas is not None:
__SCREAMING_SNAKE_CASE = torch.tensor(lowerCamelCase ,dtype=torch.floataa )
elif beta_schedule == "linear":
__SCREAMING_SNAKE_CASE = torch.linspace(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,dtype=torch.floataa )
elif beta_schedule == "scaled_linear":
# this schedule is very specific to the latent diffusion model.
__SCREAMING_SNAKE_CASE = (
torch.linspace(beta_start**0.5 ,beta_end**0.5 ,lowerCamelCase ,dtype=torch.floataa ) ** 2
)
elif beta_schedule == "squaredcos_cap_v2":
# Glide cosine schedule
__SCREAMING_SNAKE_CASE = betas_for_alpha_bar(lowerCamelCase ,alpha_transform_type="""cosine""" )
elif beta_schedule == "exp":
__SCREAMING_SNAKE_CASE = betas_for_alpha_bar(lowerCamelCase ,alpha_transform_type="""exp""" )
else:
raise NotImplementedError(f"""{beta_schedule} does is not implemented for {self.__class__}""" )
__SCREAMING_SNAKE_CASE = 1.0 - self.betas
__SCREAMING_SNAKE_CASE = torch.cumprod(self.alphas ,dim=0 )
# set all values
self.set_timesteps(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = use_karras_sigmas
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : int ,lowerCamelCase : Dict=None ):
'''simple docstring'''
if schedule_timesteps is None:
__SCREAMING_SNAKE_CASE = self.timesteps
__SCREAMING_SNAKE_CASE = (schedule_timesteps == timestep).nonzero()
# The sigma index that is taken for the **very** first `step`
# is always the second index (or the last index if there is only 1)
# This way we can ensure we don't accidentally skip a sigma in
# case we start in the middle of the denoising schedule (e.g. for image-to-image)
if len(self._index_counter ) == 0:
__SCREAMING_SNAKE_CASE = 1 if len(lowerCamelCase ) > 1 else 0
else:
__SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(lowerCamelCase ) else timestep
__SCREAMING_SNAKE_CASE = self._index_counter[timestep_int]
return indices[pos].item()
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
if self.config.timestep_spacing in ["linspace", "trailing"]:
return self.sigmas.max()
return (self.sigmas.max() ** 2 + 1) ** 0.5
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : Union[float, torch.FloatTensor] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.index_for_timestep(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
__SCREAMING_SNAKE_CASE = sample / ((sigma**2 + 1) ** 0.5)
return sample
def UpperCAmelCase__ ( self : int ,lowerCamelCase : int ,lowerCamelCase : Union[str, torch.device] = None ,lowerCamelCase : Optional[int] = None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = num_inference_steps
__SCREAMING_SNAKE_CASE = num_train_timesteps or self.config.num_train_timesteps
# "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
if self.config.timestep_spacing == "linspace":
__SCREAMING_SNAKE_CASE = np.linspace(0 ,num_train_timesteps - 1 ,lowerCamelCase ,dtype=lowerCamelCase )[::-1].copy()
elif self.config.timestep_spacing == "leading":
__SCREAMING_SNAKE_CASE = num_train_timesteps // self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE = (np.arange(0 ,lowerCamelCase ) * step_ratio).round()[::-1].copy().astype(lowerCamelCase )
timesteps += self.config.steps_offset
elif self.config.timestep_spacing == "trailing":
__SCREAMING_SNAKE_CASE = num_train_timesteps / self.num_inference_steps
# creates integer timesteps by multiplying by ratio
# casting to int to avoid issues when num_inference_step is power of 3
__SCREAMING_SNAKE_CASE = (np.arange(lowerCamelCase ,0 ,-step_ratio )).round().copy().astype(lowerCamelCase )
timesteps -= 1
else:
raise ValueError(
f"""{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'.""" )
__SCREAMING_SNAKE_CASE = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5 )
__SCREAMING_SNAKE_CASE = np.log(lowerCamelCase )
__SCREAMING_SNAKE_CASE = np.interp(lowerCamelCase ,np.arange(0 ,len(lowerCamelCase ) ) ,lowerCamelCase )
if self.config.use_karras_sigmas:
__SCREAMING_SNAKE_CASE = self._convert_to_karras(in_sigmas=lowerCamelCase ,num_inference_steps=self.num_inference_steps )
__SCREAMING_SNAKE_CASE = np.array([self._sigma_to_t(lowerCamelCase ,lowerCamelCase ) for sigma in sigmas] )
__SCREAMING_SNAKE_CASE = np.concatenate([sigmas, [0.0]] ).astype(np.floataa )
__SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase ).to(device=lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.cat([sigmas[:1], sigmas[1:-1].repeat_interleave(2 ), sigmas[-1:]] )
__SCREAMING_SNAKE_CASE = torch.from_numpy(lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.cat([timesteps[:1], timesteps[1:].repeat_interleave(2 )] )
if str(lowerCamelCase ).startswith("""mps""" ):
# mps does not support float64
__SCREAMING_SNAKE_CASE = timesteps.to(lowerCamelCase ,dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE = timesteps.to(device=lowerCamelCase )
# empty dt and derivative
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
# for exp beta schedules, such as the one for `pipeline_shap_e.py`
# we need an index counter
__SCREAMING_SNAKE_CASE = defaultdict(lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = np.log(lowerCamelCase )
# get distribution
__SCREAMING_SNAKE_CASE = log_sigma - log_sigmas[:, np.newaxis]
# get sigmas range
__SCREAMING_SNAKE_CASE = np.cumsum((dists >= 0) ,axis=0 ).argmax(axis=0 ).clip(max=log_sigmas.shape[0] - 2 )
__SCREAMING_SNAKE_CASE = low_idx + 1
__SCREAMING_SNAKE_CASE = log_sigmas[low_idx]
__SCREAMING_SNAKE_CASE = log_sigmas[high_idx]
# interpolate sigmas
__SCREAMING_SNAKE_CASE = (low - log_sigma) / (low - high)
__SCREAMING_SNAKE_CASE = np.clip(lowerCamelCase ,0 ,1 )
# transform interpolation to time range
__SCREAMING_SNAKE_CASE = (1 - w) * low_idx + w * high_idx
__SCREAMING_SNAKE_CASE = t.reshape(sigma.shape )
return t
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = in_sigmas[-1].item()
__SCREAMING_SNAKE_CASE = in_sigmas[0].item()
__SCREAMING_SNAKE_CASE = 7.0 # 7.0 is the value used in the paper
__SCREAMING_SNAKE_CASE = np.linspace(0 ,1 ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = sigma_min ** (1 / rho)
__SCREAMING_SNAKE_CASE = sigma_max ** (1 / rho)
__SCREAMING_SNAKE_CASE = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho
return sigmas
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.dt is None
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Union[torch.FloatTensor, np.ndarray] ,lowerCamelCase : Union[float, torch.FloatTensor] ,lowerCamelCase : Union[torch.FloatTensor, np.ndarray] ,lowerCamelCase : bool = True ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.index_for_timestep(lowerCamelCase )
# advance index counter by 1
__SCREAMING_SNAKE_CASE = timestep.cpu().item() if torch.is_tensor(lowerCamelCase ) else timestep
self._index_counter[timestep_int] += 1
if self.state_in_first_order:
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
__SCREAMING_SNAKE_CASE = self.sigmas[step_index + 1]
else:
# 2nd order / Heun's method
__SCREAMING_SNAKE_CASE = self.sigmas[step_index - 1]
__SCREAMING_SNAKE_CASE = self.sigmas[step_index]
# currently only gamma=0 is supported. This usually works best anyways.
# We can support gamma in the future but then need to scale the timestep before
# passing it to the model which requires a change in API
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = sigma * (gamma + 1) # Note: sigma_hat == sigma for now
# 1. compute predicted original sample (x_0) from sigma-scaled predicted noise
if self.config.prediction_type == "epsilon":
__SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_next
__SCREAMING_SNAKE_CASE = sample - sigma_input * model_output
elif self.config.prediction_type == "v_prediction":
__SCREAMING_SNAKE_CASE = sigma_hat if self.state_in_first_order else sigma_next
__SCREAMING_SNAKE_CASE = model_output * (-sigma_input / (sigma_input**2 + 1) ** 0.5) + (
sample / (sigma_input**2 + 1)
)
elif self.config.prediction_type == "sample":
__SCREAMING_SNAKE_CASE = model_output
else:
raise ValueError(
f"""prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`""" )
if self.config.clip_sample:
__SCREAMING_SNAKE_CASE = pred_original_sample.clamp(
-self.config.clip_sample_range ,self.config.clip_sample_range )
if self.state_in_first_order:
# 2. Convert to an ODE derivative for 1st order
__SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_hat
# 3. delta timestep
__SCREAMING_SNAKE_CASE = sigma_next - sigma_hat
# store for 2nd order step
__SCREAMING_SNAKE_CASE = derivative
__SCREAMING_SNAKE_CASE = dt
__SCREAMING_SNAKE_CASE = sample
else:
# 2. 2nd order / Heun's method
__SCREAMING_SNAKE_CASE = (sample - pred_original_sample) / sigma_next
__SCREAMING_SNAKE_CASE = (self.prev_derivative + derivative) / 2
# 3. take prev timestep & sample
__SCREAMING_SNAKE_CASE = self.dt
__SCREAMING_SNAKE_CASE = self.sample
# free dt and derivative
# Note, this puts the scheduler in "first order mode"
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = sample + derivative * dt
if not return_dict:
return (prev_sample,)
return SchedulerOutput(prev_sample=lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : torch.FloatTensor ,lowerCamelCase : torch.FloatTensor ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.sigmas.to(device=original_samples.device ,dtype=original_samples.dtype )
if original_samples.device.type == "mps" and torch.is_floating_point(lowerCamelCase ):
# mps does not support float64
__SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device ,dtype=torch.floataa )
__SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device ,dtype=torch.floataa )
else:
__SCREAMING_SNAKE_CASE = self.timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE = timesteps.to(original_samples.device )
__SCREAMING_SNAKE_CASE = [self.index_for_timestep(lowerCamelCase ,lowerCamelCase ) for t in timesteps]
__SCREAMING_SNAKE_CASE = sigmas[step_indices].flatten()
while len(sigma.shape ) < len(original_samples.shape ):
__SCREAMING_SNAKE_CASE = sigma.unsqueeze(-1 )
__SCREAMING_SNAKE_CASE = original_samples + noise * sigma
return noisy_samples
def __len__( self : Optional[int] ):
'''simple docstring'''
return self.config.num_train_timesteps
| 705 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a = {"configuration_opt": ["OPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "OPTConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"OPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"OPTForCausalLM",
"OPTModel",
"OPTPreTrainedModel",
"OPTForSequenceClassification",
"OPTForQuestionAnswering",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["TFOPTForCausalLM", "TFOPTModel", "TFOPTPreTrainedModel"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"FlaxOPTForCausalLM",
"FlaxOPTModel",
"FlaxOPTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_opt import OPT_PRETRAINED_CONFIG_ARCHIVE_MAP, OPTConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_opt import (
OPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OPTForCausalLM,
OPTForQuestionAnswering,
OPTForSequenceClassification,
OPTModel,
OPTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_opt import TFOPTForCausalLM, TFOPTModel, TFOPTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_opt import FlaxOPTForCausalLM, FlaxOPTModel, FlaxOPTPreTrainedModel
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 706 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase = "AAPL" ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase ).text , """html.parser""" )
__SCREAMING_SNAKE_CASE = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 13 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> float:
'''simple docstring'''
if not nums: # Makes sure that the list is not empty
raise ValueError("""List is empty""" )
__SCREAMING_SNAKE_CASE = sum(__UpperCAmelCase ) / len(__UpperCAmelCase ) # Calculate the average
return sum(abs(x - average ) for x in nums ) / len(__UpperCAmelCase )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if num < 0:
return False
__SCREAMING_SNAKE_CASE = num
__SCREAMING_SNAKE_CASE = 0
while num > 0:
__SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
'''simple docstring'''
from datetime import datetime
import matplotlib.pyplot as plt
import torch
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for param in module.parameters():
__SCREAMING_SNAKE_CASE = False
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """cuda""" if torch.cuda.is_available() else """cpu"""
if torch.backends.mps.is_available() and torch.backends.mps.is_built():
__SCREAMING_SNAKE_CASE = """mps"""
if device == "mps":
print(
"""WARNING: MPS currently doesn't seem to work, and messes up backpropagation without any visible torch"""
""" errors. I recommend using CUDA on a colab notebook or CPU instead if you're facing inexplicable issues"""
""" with generations.""" )
return device
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = plt.imshow(__UpperCAmelCase )
fig.axes.get_xaxis().set_visible(__UpperCAmelCase )
fig.axes.get_yaxis().set_visible(__UpperCAmelCase )
plt.show()
def __magic_name__ ( ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = datetime.now()
__SCREAMING_SNAKE_CASE = current_time.strftime("""%H:%M:%S""" )
return timestamp
| 708 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a = list[list[float | int]]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for row in range(__UpperCAmelCase ):
for col in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = matrix[row][col]
__SCREAMING_SNAKE_CASE = vector[row][0]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while row < size and col < size:
# pivoting
__SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__UpperCAmelCase , __UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col]
__SCREAMING_SNAKE_CASE = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __UpperCAmelCase ):
for row in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col]
for cola in range(__UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__UpperCAmelCase )
]
def __magic_name__ ( __UpperCAmelCase ) -> Callable[[int], int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = [[0] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for x_val, y_val in enumerate(__UpperCAmelCase ):
for col in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1)
__SCREAMING_SNAKE_CASE = y_val
__SCREAMING_SNAKE_CASE = solve(__UpperCAmelCase , __UpperCAmelCase )
def interpolated_func(__UpperCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__UpperCAmelCase ) )
return interpolated_func
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __magic_name__ ( __UpperCAmelCase = question_function , __UpperCAmelCase = 10 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [func(__UpperCAmelCase ) for x_val in range(1 , order + 1 )]
__SCREAMING_SNAKE_CASE = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for poly in polynomials:
__SCREAMING_SNAKE_CASE = 1
while func(__UpperCAmelCase ) == poly(__UpperCAmelCase ):
x_val += 1
ret += poly(__UpperCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 13 | 0 |
'''simple docstring'''
from graphs.minimum_spanning_tree_kruskal import kruskal
def __magic_name__ ( ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 9
__SCREAMING_SNAKE_CASE = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 14],
[3, 4, 9],
[5, 4, 10],
[1, 7, 11],
]
__SCREAMING_SNAKE_CASE = kruskal(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
assert sorted(__UpperCAmelCase ) == sorted(__UpperCAmelCase )
| 709 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class __a ( _snake_case ):
def __init__( self : Union[str, Any] ,**lowerCamelCase : str ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Dict ,lowerCamelCase : Union[str, List[str], "Image", List["Image"]] ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return super().__call__(lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Union[str, Any]="This is a photo of {}." ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.image_processor(images=[image] ,return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = candidate_labels
__SCREAMING_SNAKE_CASE = [hypothesis_template.format(lowerCamelCase ) for x in candidate_labels]
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,return_tensors=self.framework ,padding=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [text_inputs]
return inputs
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = text_inputs[0]
else:
# Batching case.
__SCREAMING_SNAKE_CASE = text_inputs[0][0]
__SCREAMING_SNAKE_CASE = self.model(**lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_outputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""][0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = probs.tolist()
if not isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = [scores]
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = stable_softmax(lowerCamelCase ,axis=-1 )
__SCREAMING_SNAKE_CASE = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__SCREAMING_SNAKE_CASE = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase ,lowerCamelCase ) ,key=lambda lowerCamelCase : -x[0] )
]
return result
| 13 | 0 |
'''simple docstring'''
import shutil
import tempfile
import unittest
from transformers import (
SPIECE_UNDERLINE,
AddedToken,
BatchEncoding,
NllbTokenizer,
NllbTokenizerFast,
is_torch_available,
)
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
a = get_tests_dir("fixtures/test_sentencepiece.model")
if is_torch_available():
from transformers.models.mam_aaa.modeling_mam_aaa import shift_tokens_right
a = 256047
a = 256145
@require_sentencepiece
@require_tokenizers
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : List[str] = NllbTokenizer
__UpperCamelCase : Union[str, Any] = NllbTokenizerFast
__UpperCamelCase : Union[str, Any] = True
__UpperCamelCase : Dict = True
__UpperCamelCase : Optional[Any] = {}
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
super().setUp()
# We have a SentencePiece fixture for testing
__SCREAMING_SNAKE_CASE = NllbTokenizer(lowerCamelCase ,keep_accents=lowerCamelCase )
tokenizer.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = NllbTokenizer(lowerCamelCase ,keep_accents=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""This is a test""" )
self.assertListEqual(lowerCamelCase ,["""▁This""", """▁is""", """▁a""", """▁t""", """est"""] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCamelCase ) ,[value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] ,)
__SCREAMING_SNAKE_CASE = tokenizer.tokenize("""I was born in 92000, and this is falsé.""" )
self.assertListEqual(
lowerCamelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""9""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""é""",
""".""",
] ,)
__SCREAMING_SNAKE_CASE = tokenizer.convert_tokens_to_ids(lowerCamelCase )
self.assertListEqual(
lowerCamelCase ,[
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, 2, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 2, 4]
] ,)
__SCREAMING_SNAKE_CASE = tokenizer.convert_ids_to_tokens(lowerCamelCase )
self.assertListEqual(
lowerCamelCase ,[
SPIECE_UNDERLINE + """I""",
SPIECE_UNDERLINE + """was""",
SPIECE_UNDERLINE + """b""",
"""or""",
"""n""",
SPIECE_UNDERLINE + """in""",
SPIECE_UNDERLINE + """""",
"""<unk>""",
"""2""",
"""0""",
"""0""",
"""0""",
""",""",
SPIECE_UNDERLINE + """and""",
SPIECE_UNDERLINE + """this""",
SPIECE_UNDERLINE + """is""",
SPIECE_UNDERLINE + """f""",
"""al""",
"""s""",
"""<unk>""",
""".""",
] ,)
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (self.rust_tokenizer_class, """hf-internal-testing/tiny-random-nllb""", {})
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files + the tokenizer.json file for the fast one
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
__SCREAMING_SNAKE_CASE = tuple(f for f in tokenizer_r_files if """tokenizer.json""" not in f )
self.assertSequenceEqual(lowerCamelCase ,lowerCamelCase )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase ,lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=True
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCamelCase ,legacy_format=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it save with the same files
self.assertSequenceEqual(lowerCamelCase ,lowerCamelCase )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase ,lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
# Save tokenizer rust, legacy_format=False
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = tokenizer_r.save_pretrained(lowerCamelCase ,legacy_format=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.save_pretrained(lowerCamelCase )
# Checks it saved the tokenizer.json file
self.assertTrue(any("""tokenizer.json""" in f for f in tokenizer_r_files ) )
# Checks everything loads correctly in the same way
__SCREAMING_SNAKE_CASE = tokenizer_r.from_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.from_pretrained(lowerCamelCase )
# Check special tokens are set accordingly on Rust and Python
for key in tokenizer_pp.special_tokens_map:
self.assertTrue(hasattr(lowerCamelCase ,lowerCamelCase ) )
shutil.rmtree(lowerCamelCase )
@require_torch
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
if not self.test_seqaseq:
return
__SCREAMING_SNAKE_CASE = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f"""{tokenizer.__class__.__name__}""" ):
# Longer text that will definitely require truncation.
__SCREAMING_SNAKE_CASE = [
""" UN Chief Says There Is No Military Solution in Syria""",
""" Secretary-General Ban Ki-moon says his response to Russia's stepped up military support for"""
""" Syria is that 'there is no military solution' to the nearly five-year conflict and more weapons"""
""" will only worsen the violence and misery for millions of people.""",
]
__SCREAMING_SNAKE_CASE = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
"""Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al"""
""" Rusiei pentru Siria este că \"nu există o soluţie militară\" la conflictul de aproape cinci ani şi"""
""" că noi arme nu vor face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.""",
]
try:
__SCREAMING_SNAKE_CASE = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCamelCase ,tgt_texts=lowerCamelCase ,max_length=3 ,max_target_length=10 ,return_tensors="""pt""" ,src_lang="""eng_Latn""" ,tgt_lang="""ron_Latn""" ,)
except NotImplementedError:
return
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.labels.shape[1] ,10 )
# max_target_length will default to max_length if not specified
__SCREAMING_SNAKE_CASE = tokenizer.prepare_seqaseq_batch(
lowerCamelCase ,tgt_texts=lowerCamelCase ,max_length=3 ,return_tensors="""pt""" )
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.labels.shape[1] ,3 )
__SCREAMING_SNAKE_CASE = tokenizer.prepare_seqaseq_batch(
src_texts=lowerCamelCase ,max_length=3 ,max_target_length=10 ,return_tensors="""pt""" )
self.assertEqual(batch_encoder_only.input_ids.shape[1] ,3 )
self.assertEqual(batch_encoder_only.attention_mask.shape[1] ,3 )
self.assertNotIn("""decoder_input_ids""" ,lowerCamelCase )
@unittest.skip("""Unfortunately way too slow to build a BPE with SentencePiece.""" )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
__SCREAMING_SNAKE_CASE = [AddedToken("""<special>""" ,lstrip=lowerCamelCase )]
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase ,additional_special_tokens=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_r.encode("""Hey this is a <special> token""" )
__SCREAMING_SNAKE_CASE = tokenizer_r.encode("""<special>""" ,add_special_tokens=lowerCamelCase )[0]
self.assertTrue(special_token_id in r_output )
if self.test_slow_tokenizer:
__SCREAMING_SNAKE_CASE = self.rust_tokenizer_class.from_pretrained(
lowerCamelCase ,additional_special_tokens=lowerCamelCase ,**lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = self.tokenizer_class.from_pretrained(
lowerCamelCase ,additional_special_tokens=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer_p.encode("""Hey this is a <special> token""" )
__SCREAMING_SNAKE_CASE = tokenizer_cr.encode("""Hey this is a <special> token""" )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
self.assertTrue(special_token_id in p_output )
self.assertTrue(special_token_id in cr_output )
@require_torch
@require_sentencepiece
@require_tokenizers
class __a ( unittest.TestCase ):
__UpperCamelCase : Tuple = 'facebook/nllb-200-distilled-600M'
__UpperCamelCase : Union[str, Any] = [
' UN Chief Says There Is No Military Solution in Syria',
' Secretary-General Ban Ki-moon says his response to Russia\'s stepped up military support for Syria is that "there is no military solution" to the nearly five-year conflict and more weapons will only worsen the violence and misery for millions of people.',
]
__UpperCamelCase : List[Any] = [
'Şeful ONU declară că nu există o soluţie militară în Siria',
'Secretarul General Ban Ki-moon declară că răspunsul său la intensificarea sprijinului militar al Rusiei'
' pentru Siria este că "nu există o soluţie militară" la conflictul de aproape cinci ani şi că noi arme nu vor'
' face decât să înrăutăţească violenţele şi mizeria pentru milioane de oameni.',
]
__UpperCamelCase : Dict = [
25_6047,
1_6297,
13_4408,
8165,
24_8066,
1_4734,
950,
1135,
10_5721,
3573,
83,
2_7352,
108,
4_9486,
2,
]
@classmethod
def UpperCAmelCase__ ( cls : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = NllbTokenizer.from_pretrained(
cls.checkpoint_name ,src_lang="""eng_Latn""" ,tgt_lang="""ron_Latn""" )
__SCREAMING_SNAKE_CASE = 1
return cls
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Arab"""] ,25_6001 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""ace_Latn"""] ,25_6002 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["""fra_Latn"""] ,25_6057 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
self.assertIn(lowerCamelCase ,self.tokenizer.all_special_ids )
# fmt: off
__SCREAMING_SNAKE_CASE = [RO_CODE, 4254, 9_8068, 11_2923, 3_9072, 3909, 713, 10_2767, 26, 1_7314, 3_5642, 1_4683, 3_3118, 2022, 6_6987, 2, 25_6047]
# fmt: on
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(lowerCamelCase ,skip_special_tokens=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(generated_ids[1:] ,skip_special_tokens=lowerCamelCase )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
self.assertNotIn(self.tokenizer.eos_token ,lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ["""this is gunna be a long sentence """ * 20]
assert isinstance(src_text[0] ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = 10
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,max_length=lowerCamelCase ,truncation=lowerCamelCase ).input_ids[0]
self.assertEqual(ids[-1] ,2 )
self.assertEqual(ids[0] ,lowerCamelCase )
self.assertEqual(len(lowerCamelCase ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["""<mask>""", """ar_AR"""] ) ,[25_6203, 3] )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = NllbTokenizer.from_pretrained(lowerCamelCase )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids ,lowerCamelCase )
@require_torch
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer(
self.src_text ,text_target=self.tgt_text ,padding=lowerCamelCase ,truncation=lowerCamelCase ,max_length=len(self.expected_src_tokens ) ,return_tensors="""pt""" ,)
__SCREAMING_SNAKE_CASE = shift_tokens_right(
batch["""labels"""] ,self.tokenizer.pad_token_id ,self.tokenizer.lang_code_to_id["""ron_Latn"""] )
self.assertIsInstance(lowerCamelCase ,lowerCamelCase )
self.assertEqual((2, 15) ,batch.input_ids.shape )
self.assertEqual((2, 15) ,batch.attention_mask.shape )
__SCREAMING_SNAKE_CASE = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens ,lowerCamelCase )
self.assertEqual(lowerCamelCase ,batch.decoder_input_ids[0, 0] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens ,[EN_CODE] )
self.assertEqual(self.tokenizer.suffix_tokens ,[self.tokenizer.eos_token_id] )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer(self.src_text ,padding=lowerCamelCase ,truncation=lowerCamelCase ,max_length=3 ,return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE = self.tokenizer(
text_target=self.tgt_text ,padding=lowerCamelCase ,truncation=lowerCamelCase ,max_length=10 ,return_tensors="""pt""" )
__SCREAMING_SNAKE_CASE = targets["""input_ids"""]
__SCREAMING_SNAKE_CASE = shift_tokens_right(
lowerCamelCase ,self.tokenizer.pad_token_id ,decoder_start_token_id=self.tokenizer.lang_code_to_id[self.tokenizer.tgt_lang] ,)
self.assertEqual(batch.input_ids.shape[1] ,3 )
self.assertEqual(batch.decoder_input_ids.shape[1] ,10 )
@require_torch
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer._build_translation_inputs(
"""A test""" ,return_tensors="""pt""" ,src_lang="""eng_Latn""" ,tgt_lang="""fra_Latn""" )
self.assertEqual(
nested_simplify(lowerCamelCase ) ,{
# A, test, EOS, en_XX
"""input_ids""": [[25_6047, 70, 7356, 2]],
"""attention_mask""": [[1, 1, 1, 1]],
# ar_AR
"""forced_bos_token_id""": 25_6057,
} ,)
@require_torch
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" ,src_lang="""eng_Latn""" ,tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids ,[1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2, 25_6047] )
__SCREAMING_SNAKE_CASE = False
__SCREAMING_SNAKE_CASE = self.tokenizer(
"""UN Chief says there is no military solution in Syria""" ,src_lang="""eng_Latn""" ,tgt_lang="""fra_Latn""" )
self.assertEqual(
inputs.input_ids ,[25_6047, 1_6297, 13_4408, 2_5653, 6370, 248, 254, 10_3929, 9_4995, 108, 4_9486, 2] )
| 710 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
require_version(deps[pkg] , __UpperCAmelCase )
| 13 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = 0
while a < n:
if a % 3 == 0 or a % 5 == 0:
result += a
elif a % 15 == 0:
result -= a
a += 1
return result
if __name__ == "__main__":
print(F'''{solution() = }''')
| 711 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
a = logging.getLogger(__name__)
@dataclass
class __a :
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}, )
__UpperCamelCase : int = field(
default=1024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the training data.'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the validation data.'} )
__UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__SCREAMING_SNAKE_CASE = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__SCREAMING_SNAKE_CASE = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __a :
__UpperCamelCase : str = field(
default=_snake_case, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, )
__UpperCamelCase : str = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
def __magic_name__ ( ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__SCREAMING_SNAKE_CASE = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__SCREAMING_SNAKE_CASE = data_args.train_file.split(""".""" )[-1]
__SCREAMING_SNAKE_CASE = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__SCREAMING_SNAKE_CASE = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__SCREAMING_SNAKE_CASE = load_dataset("""csv""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__SCREAMING_SNAKE_CASE = load_dataset("""json""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features["""label"""].names
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__SCREAMING_SNAKE_CASE = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__SCREAMING_SNAKE_CASE = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__SCREAMING_SNAKE_CASE = {"""Refused""": 0, """Entailed""": 1}
__SCREAMING_SNAKE_CASE = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__UpperCAmelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__SCREAMING_SNAKE_CASE = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__SCREAMING_SNAKE_CASE = examples["""statement"""]
__SCREAMING_SNAKE_CASE = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__SCREAMING_SNAKE_CASE = tokenizer(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__SCREAMING_SNAKE_CASE = raw_datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__SCREAMING_SNAKE_CASE = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__UpperCAmelCase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , __UpperCAmelCase ) else p.predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = default_data_collator
elif training_args.fpaa:
__SCREAMING_SNAKE_CASE = DataCollatorWithPadding(__UpperCAmelCase , pad_to_multiple_of=8 )
else:
__SCREAMING_SNAKE_CASE = None
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = train_result.metrics
__SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __UpperCAmelCase )
trainer.save_metrics("""train""" , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics("""eval""" , __UpperCAmelCase )
trainer.save_metrics("""eval""" , __UpperCAmelCase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__SCREAMING_SNAKE_CASE = predict_dataset.remove_columns("""label""" )
__SCREAMING_SNAKE_CASE = trainer.predict(__UpperCAmelCase , metric_key_prefix="""predict""" ).predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
__SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(__UpperCAmelCase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
__SCREAMING_SNAKE_CASE = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 13 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"naver-clova-ix/donut-base": "https://huggingface.co/naver-clova-ix/donut-base/resolve/main/config.json",
# See all Donut models at https://huggingface.co/models?filter=donut-swin
}
class __a ( _snake_case ):
__UpperCamelCase : Union[str, Any] = 'donut-swin'
__UpperCamelCase : Dict = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : List[str] ,lowerCamelCase : List[Any]=224 ,lowerCamelCase : List[str]=4 ,lowerCamelCase : str=3 ,lowerCamelCase : Tuple=96 ,lowerCamelCase : Dict=[2, 2, 6, 2] ,lowerCamelCase : str=[3, 6, 12, 24] ,lowerCamelCase : List[str]=7 ,lowerCamelCase : Dict=4.0 ,lowerCamelCase : str=True ,lowerCamelCase : Union[str, Any]=0.0 ,lowerCamelCase : Union[str, Any]=0.0 ,lowerCamelCase : str=0.1 ,lowerCamelCase : Dict="gelu" ,lowerCamelCase : Union[str, Any]=False ,lowerCamelCase : Dict=0.02 ,lowerCamelCase : List[Any]=1E-5 ,**lowerCamelCase : Any ,):
'''simple docstring'''
super().__init__(**lowerCamelCase )
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = len(lowerCamelCase )
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = window_size
__SCREAMING_SNAKE_CASE = mlp_ratio
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = use_absolute_embeddings
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
__SCREAMING_SNAKE_CASE = int(embed_dim * 2 ** (len(lowerCamelCase ) - 1) )
| 712 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 13 | 0 |
'''simple docstring'''
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
WavaVecaConfig,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaForCTC,
WavaVecaForPreTraining,
WavaVecaProcessor,
logging,
)
from transformers.models.wavaveca.modeling_wavaveca import WavaVecaForSequenceClassification
logging.set_verbosity_info()
a = logging.get_logger(__name__)
a = {
"post_extract_proj": "feature_projection.projection",
"encoder.pos_conv.0": "encoder.pos_conv_embed.conv",
"self_attn.k_proj": "encoder.layers.*.attention.k_proj",
"self_attn.v_proj": "encoder.layers.*.attention.v_proj",
"self_attn.q_proj": "encoder.layers.*.attention.q_proj",
"self_attn.out_proj": "encoder.layers.*.attention.out_proj",
"self_attn_layer_norm": "encoder.layers.*.layer_norm",
"fc1": "encoder.layers.*.feed_forward.intermediate_dense",
"fc2": "encoder.layers.*.feed_forward.output_dense",
"final_layer_norm": "encoder.layers.*.final_layer_norm",
"encoder.layer_norm": "encoder.layer_norm",
"adapter_layer": "encoder.layers.*.adapter_layer",
"w2v_model.layer_norm": "feature_projection.layer_norm",
"quantizer.weight_proj": "quantizer.weight_proj",
"quantizer.vars": "quantizer.codevectors",
"project_q": "project_q",
"final_proj": "project_hid",
"w2v_encoder.proj": "lm_head",
"mask_emb": "masked_spec_embed",
"pooling_layer.linear": "projector",
"pooling_layer.projection": "classifier",
}
a = [
"lm_head",
"quantizer.weight_proj",
"quantizer.codevectors",
"project_q",
"project_hid",
"projector",
"classifier",
]
def __magic_name__ ( __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
with open(__UpperCAmelCase , """r""" ) as file:
for line_number, line in enumerate(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = line.strip()
if line:
__SCREAMING_SNAKE_CASE = line.split()
__SCREAMING_SNAKE_CASE = line_number
__SCREAMING_SNAKE_CASE = words[0]
__SCREAMING_SNAKE_CASE = value
return result
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
for attribute in key.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__SCREAMING_SNAKE_CASE = """param"""
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE = getattr(__UpperCAmelCase , __UpperCAmelCase ).shape
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE = hf_pointer
for attribute in hf_param_name.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = shape_pointer.shape
# let's reduce dimension
__SCREAMING_SNAKE_CASE = value[0]
else:
__SCREAMING_SNAKE_CASE = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
f"""Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be"""
f""" {value.shape} for {full_name}""" )
if weight_type == "weight":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_g":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "weight_v":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "bias":
__SCREAMING_SNAKE_CASE = value
elif weight_type == "param":
for attribute in hf_param_name.split(""".""" ):
__SCREAMING_SNAKE_CASE = getattr(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = value
else:
__SCREAMING_SNAKE_CASE = value
logger.info(f"""{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = None
for param_key in PARAM_MAPPING.keys():
if full_name.endswith(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = PARAM_MAPPING[full_name.split(""".""" )[-1]]
__SCREAMING_SNAKE_CASE = """param"""
if weight_type is not None and weight_type != "param":
__SCREAMING_SNAKE_CASE = """.""".join([key, weight_type] )
elif weight_type is not None and weight_type == "param":
__SCREAMING_SNAKE_CASE = """.""".join([key, hf_param_name] )
else:
__SCREAMING_SNAKE_CASE = key
__SCREAMING_SNAKE_CASE = value if """lm_head""" in full_key else value[0]
a = {
"W_a": "linear_1.weight",
"W_b": "linear_2.weight",
"b_a": "linear_1.bias",
"b_b": "linear_2.bias",
"ln_W": "norm.weight",
"ln_b": "norm.bias",
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = False
for key, mapped_key in MAPPING.items():
__SCREAMING_SNAKE_CASE = """wav2vec2.""" + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("""w2v_model.""" )[-1] == name.split(""".""" )[0]:
__SCREAMING_SNAKE_CASE = True
if "*" in mapped_key:
__SCREAMING_SNAKE_CASE = name.split(__UpperCAmelCase )[0].split(""".""" )[-2]
__SCREAMING_SNAKE_CASE = mapped_key.replace("""*""" , __UpperCAmelCase )
if "weight_g" in name:
__SCREAMING_SNAKE_CASE = """weight_g"""
elif "weight_v" in name:
__SCREAMING_SNAKE_CASE = """weight_v"""
elif "bias" in name:
__SCREAMING_SNAKE_CASE = """bias"""
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
__SCREAMING_SNAKE_CASE = """weight"""
else:
__SCREAMING_SNAKE_CASE = None
if hf_dict is not None:
rename_dict(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
else:
set_recursively(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
return is_used
return is_used
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = fairseq_model.state_dict()
__SCREAMING_SNAKE_CASE = hf_model.wavaveca.feature_extractor
for name, value in fairseq_dict.items():
__SCREAMING_SNAKE_CASE = False
if "conv_layers" in name:
load_conv_layer(
__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , hf_model.config.feat_extract_norm == """group""" , )
__SCREAMING_SNAKE_CASE = True
else:
__SCREAMING_SNAKE_CASE = load_wavaveca_layer(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
if not is_used:
unused_weights.append(__UpperCAmelCase )
logger.warning(f"""Unused weights: {unused_weights}""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = full_name.split("""conv_layers.""" )[-1]
__SCREAMING_SNAKE_CASE = name.split(""".""" )
__SCREAMING_SNAKE_CASE = int(items[0] )
__SCREAMING_SNAKE_CASE = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract conv layer {layer_id} was initialized from {full_name}.""" )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
f"""{full_name} has size {value.shape}, but"""
f""" {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.""" )
__SCREAMING_SNAKE_CASE = value
logger.info(f"""Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.""" )
else:
unused_weights.append(__UpperCAmelCase )
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=None , __UpperCAmelCase=True , __UpperCAmelCase=False ) -> List[Any]:
'''simple docstring'''
if config_path is not None:
__SCREAMING_SNAKE_CASE = WavaVecaConfig.from_pretrained(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = WavaVecaConfig()
if is_seq_class:
__SCREAMING_SNAKE_CASE = read_txt_into_dict(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = idalabel
__SCREAMING_SNAKE_CASE = WavaVecaForSequenceClassification(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
feature_extractor.save_pretrained(__UpperCAmelCase )
elif is_finetuned:
if dict_path:
__SCREAMING_SNAKE_CASE = Dictionary.load(__UpperCAmelCase )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
__SCREAMING_SNAKE_CASE = target_dict.pad_index
__SCREAMING_SNAKE_CASE = target_dict.bos_index
__SCREAMING_SNAKE_CASE = target_dict.eos_index
__SCREAMING_SNAKE_CASE = len(target_dict.symbols )
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , """vocab.json""" )
if not os.path.isdir(__UpperCAmelCase ):
logger.error("""--pytorch_dump_folder_path ({}) should be a directory""".format(__UpperCAmelCase ) )
return
os.makedirs(__UpperCAmelCase , exist_ok=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = target_dict.indices
# fairseq has the <pad> and <s> switched
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 1
with open(__UpperCAmelCase , """w""" , encoding="""utf-8""" ) as vocab_handle:
json.dump(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = WavaVecaCTCTokenizer(
__UpperCAmelCase , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token="""|""" , do_lower_case=__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = True if config.feat_extract_norm == """layer""" else False
__SCREAMING_SNAKE_CASE = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=16000 , padding_value=0 , do_normalize=__UpperCAmelCase , return_attention_mask=__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = WavaVecaProcessor(feature_extractor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
processor.save_pretrained(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = WavaVecaForCTC(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = WavaVecaForPreTraining(__UpperCAmelCase )
if is_finetuned or is_seq_class:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"""data""": """/""".join(dict_path.split("""/""" )[:-1] )} )
else:
__SCREAMING_SNAKE_CASE = argparse.Namespace(task="""audio_pretraining""" )
__SCREAMING_SNAKE_CASE = fairseq.tasks.setup_task(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] , task=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = model[0].eval()
recursively_load_weights(__UpperCAmelCase , __UpperCAmelCase , not is_finetuned )
hf_wavavec.save_pretrained(__UpperCAmelCase )
if __name__ == "__main__":
a = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to fairseq checkpoint")
parser.add_argument("--dict_path", default=None, type=str, help="Path to dict of fine-tuned model")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
parser.add_argument(
"--not_finetuned", action="store_true", help="Whether the model to convert is a fine-tuned model or not"
)
parser.add_argument(
"--is_seq_class",
action="store_true",
help="Whether the model to convert is a fine-tuned sequence classification model or not",
)
a = parser.parse_args()
a = not args.not_finetuned and not args.is_seq_class
convert_wavaveca_checkpoint(
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.dict_path,
is_finetuned,
args.is_seq_class,
)
| 713 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase , params=__UpperCAmelCase ).content , """html.parser""" )
__SCREAMING_SNAKE_CASE = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
__SCREAMING_SNAKE_CASE = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
a = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 13 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
a = {
"configuration_transfo_xl": ["TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP", "TransfoXLConfig"],
"tokenization_transfo_xl": ["TransfoXLCorpus", "TransfoXLTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"AdaptiveEmbedding",
"TransfoXLForSequenceClassification",
"TransfoXLLMHeadModel",
"TransfoXLModel",
"TransfoXLPreTrainedModel",
"load_tf_weights_in_transfo_xl",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFAdaptiveEmbedding",
"TFTransfoXLForSequenceClassification",
"TFTransfoXLLMHeadModel",
"TFTransfoXLMainLayer",
"TFTransfoXLModel",
"TFTransfoXLPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_transfo_xl import TRANSFO_XL_PRETRAINED_CONFIG_ARCHIVE_MAP, TransfoXLConfig
from .tokenization_transfo_xl import TransfoXLCorpus, TransfoXLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_transfo_xl import (
TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
AdaptiveEmbedding,
TransfoXLForSequenceClassification,
TransfoXLLMHeadModel,
TransfoXLModel,
TransfoXLPreTrainedModel,
load_tf_weights_in_transfo_xl,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_transfo_xl import (
TF_TRANSFO_XL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAdaptiveEmbedding,
TFTransfoXLForSequenceClassification,
TFTransfoXLLMHeadModel,
TFTransfoXLMainLayer,
TFTransfoXLModel,
TFTransfoXLPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 714 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'camembert'
def __init__( self : int ,lowerCamelCase : List[Any]=3_0522 ,lowerCamelCase : List[Any]=768 ,lowerCamelCase : str=12 ,lowerCamelCase : List[str]=12 ,lowerCamelCase : Optional[Any]=3072 ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=512 ,lowerCamelCase : Dict=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : List[Any]=1E-1_2 ,lowerCamelCase : Union[str, Any]=1 ,lowerCamelCase : Optional[Any]=0 ,lowerCamelCase : List[Any]=2 ,lowerCamelCase : List[str]="absolute" ,lowerCamelCase : int=True ,lowerCamelCase : Any=None ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class __a ( _snake_case ):
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 13 | 0 |
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers.testing_utils import require_vision
from transformers.utils import is_vision_available
if is_vision_available():
from PIL import Image
from transformers import (
AutoProcessor,
BertTokenizerFast,
BlipImageProcessor,
GPTaTokenizer,
InstructBlipProcessor,
PreTrainedTokenizerFast,
)
@require_vision
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
__SCREAMING_SNAKE_CASE = BlipImageProcessor()
__SCREAMING_SNAKE_CASE = GPTaTokenizer.from_pretrained("""hf-internal-testing/tiny-random-GPT2Model""" )
__SCREAMING_SNAKE_CASE = BertTokenizerFast.from_pretrained("""hf-internal-testing/tiny-random-bert""" )
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase ).tokenizer
def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : List[Any] ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase ).image_processor
def UpperCAmelCase__ ( self : List[Any] ,**lowerCamelCase : Dict ):
'''simple docstring'''
return AutoProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase ).qformer_tokenizer
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() ,qformer_tokenizer=self.get_qformer_tokenizer() ,)
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=lowerCamelCase ,padding_value=1.0 )
__SCREAMING_SNAKE_CASE = InstructBlipProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=lowerCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,lowerCamelCase )
self.assertIsInstance(processor.qformer_tokenizer ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCamelCase ,image_processor=lowerCamelCase ,qformer_tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(lowerCamelCase ,return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = processor(images=lowerCamelCase ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCamelCase ,image_processor=lowerCamelCase ,qformer_tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer(lowerCamelCase ,return_token_type_ids=lowerCamelCase )
__SCREAMING_SNAKE_CASE = qformer_tokenizer(lowerCamelCase ,return_token_type_ids=lowerCamelCase )
for key in encoded_tokens.keys():
self.assertListEqual(encoded_tokens[key] ,encoded_processor[key] )
for key in encoded_tokens_qformer.keys():
self.assertListEqual(encoded_tokens_qformer[key] ,encoded_processor["""qformer_""" + key] )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCamelCase ,image_processor=lowerCamelCase ,qformer_tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase ,images=lowerCamelCase )
self.assertListEqual(
list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] ,)
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCamelCase ,image_processor=lowerCamelCase ,qformer_tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_qformer_tokenizer()
__SCREAMING_SNAKE_CASE = InstructBlipProcessor(
tokenizer=lowerCamelCase ,image_processor=lowerCamelCase ,qformer_tokenizer=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase ,images=lowerCamelCase )
self.assertListEqual(
list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """qformer_input_ids""", """qformer_attention_mask""", """pixel_values"""] ,)
| 715 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __a ( unittest.TestCase ):
def __init__( self : Optional[int] ,lowerCamelCase : str ,lowerCamelCase : List[str]=13 ,lowerCamelCase : Optional[Any]=30 ,lowerCamelCase : Dict=2 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : List[str]=True ,lowerCamelCase : str=True ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Dict=5 ,lowerCamelCase : Optional[int]=4 ,lowerCamelCase : List[Any]=37 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Any=0.1 ,lowerCamelCase : str=10 ,lowerCamelCase : Dict=0.02 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase ,initializer_range=self.initializer_range ,)
return config, pixel_values
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModel(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(lowerCamelCase )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase ,hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
@jax.jit
def model_jitted(lowerCamelCase : int ,**lowerCamelCase : Union[str, Any] ):
return model(pixel_values=lowerCamelCase ,**lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) ,len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase ,lowerCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase )
| 13 | 0 |
'''simple docstring'''
import os
import string
import sys
a = 1 << 8
a = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
a = KEYMAP["up"]
a = KEYMAP["left"]
if sys.platform == "win32":
a = []
a = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
a = ord(str(i))
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
__SCREAMING_SNAKE_CASE : Union[str, Any] = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCAmelCase ) == 0:
# Read the keystroke
__SCREAMING_SNAKE_CASE : Dict = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__SCREAMING_SNAKE_CASE : Optional[Any] = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__SCREAMING_SNAKE_CASE : int = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__UpperCAmelCase )
if ord(__UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__SCREAMING_SNAKE_CASE : Optional[Any] = chr(KEYMAP["""esc"""] )
except KeyError:
__SCREAMING_SNAKE_CASE : List[Any] = cha[1]
else:
__SCREAMING_SNAKE_CASE : Dict = ch.decode(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE : Tuple = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__SCREAMING_SNAKE_CASE : Tuple = sys.stdin.fileno()
__SCREAMING_SNAKE_CASE : int = termios.tcgetattr(__UpperCAmelCase )
try:
tty.setraw(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Optional[int] = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase )
return ch
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Dict = get_raw_chars()
if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCAmelCase ) == KEYMAP["esc"]:
__SCREAMING_SNAKE_CASE : Tuple = get_raw_chars()
if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]:
__SCREAMING_SNAKE_CASE : int = get_raw_chars()
if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 716 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'sew'
def __init__( self : str ,lowerCamelCase : Any=32 ,lowerCamelCase : str=768 ,lowerCamelCase : str=12 ,lowerCamelCase : Union[str, Any]=12 ,lowerCamelCase : Union[str, Any]=3072 ,lowerCamelCase : int=2 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : List[str]=1E-5 ,lowerCamelCase : Tuple="group" ,lowerCamelCase : Optional[Any]="gelu" ,lowerCamelCase : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,lowerCamelCase : Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,lowerCamelCase : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Dict=128 ,lowerCamelCase : Union[str, Any]=16 ,lowerCamelCase : List[Any]=True ,lowerCamelCase : List[Any]=0.05 ,lowerCamelCase : Optional[int]=10 ,lowerCamelCase : Any=2 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Tuple=10 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple="mean" ,lowerCamelCase : int=False ,lowerCamelCase : Dict=False ,lowerCamelCase : Optional[int]=256 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : Tuple=2 ,**lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase ,pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = squeeze_factor
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# sequence classification
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE = classifier_proj_size
@property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 13 | 0 |
'''simple docstring'''
import uuid
from typing import Any, Dict, List, Optional, Union
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
a = logging.get_logger(__name__)
class __a :
def __init__( self : int ,lowerCamelCase : str = None ,lowerCamelCase : uuid.UUID = None ,lowerCamelCase : Optional[int]=None ,lowerCamelCase : Union[str, Any]=None ):
'''simple docstring'''
if not conversation_id:
__SCREAMING_SNAKE_CASE = uuid.uuida()
if past_user_inputs is None:
__SCREAMING_SNAKE_CASE = []
if generated_responses is None:
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = conversation_id
__SCREAMING_SNAKE_CASE = past_user_inputs
__SCREAMING_SNAKE_CASE = generated_responses
__SCREAMING_SNAKE_CASE = text
def __eq__( self : str ,lowerCamelCase : Tuple ):
'''simple docstring'''
if not isinstance(lowerCamelCase ,lowerCamelCase ):
return False
if self.uuid == other.uuid:
return True
return (
self.new_user_input == other.new_user_input
and self.past_user_inputs == other.past_user_inputs
and self.generated_responses == other.generated_responses
)
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ,lowerCamelCase : bool = False ):
'''simple docstring'''
if self.new_user_input:
if overwrite:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" was overwritten """
f"""with: \"{text}\".""" )
__SCREAMING_SNAKE_CASE = text
else:
logger.warning(
f"""User input added while unprocessed input was existing: \"{self.new_user_input}\" new input """
f"""ignored: \"{text}\". Set `overwrite` to True to overwrite unprocessed user input""" )
else:
__SCREAMING_SNAKE_CASE = text
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if self.new_user_input:
self.past_user_inputs.append(self.new_user_input )
__SCREAMING_SNAKE_CASE = None
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : str ):
'''simple docstring'''
self.generated_responses.append(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
for user_input, generated_response in zip(self.past_user_inputs ,self.generated_responses ):
yield True, user_input
yield False, generated_response
if self.new_user_input:
yield True, self.new_user_input
def __repr__( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""Conversation id: {self.uuid} \n"""
for is_user, text in self.iter_texts():
__SCREAMING_SNAKE_CASE = """user""" if is_user else """bot"""
output += f"""{name} >> {text} \n"""
return output
@add_end_docstrings(
_snake_case, R'\n min_length_for_response (`int`, *optional*, defaults to 32):\n The minimum length (in number of tokens) for a response.\n minimum_tokens (`int`, *optional*, defaults to 10):\n The minimum length of tokens to leave for a response.\n ', )
class __a ( _snake_case ):
def __init__( self : str ,*lowerCamelCase : Tuple ,**lowerCamelCase : str ):
'''simple docstring'''
super().__init__(*lowerCamelCase ,**lowerCamelCase )
if self.tokenizer.pad_token_id is None:
__SCREAMING_SNAKE_CASE = self.tokenizer.eos_token
def UpperCAmelCase__ ( self : List[str] ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Tuple=None ,lowerCamelCase : Union[str, Any]=None ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = {}
if min_length_for_response is not None:
__SCREAMING_SNAKE_CASE = min_length_for_response
if minimum_tokens is not None:
__SCREAMING_SNAKE_CASE = minimum_tokens
if "max_length" in generate_kwargs:
__SCREAMING_SNAKE_CASE = generate_kwargs["""max_length"""]
# self.max_length = generate_kwargs.get("max_length", self.model.config.max_length)
if clean_up_tokenization_spaces is not None:
__SCREAMING_SNAKE_CASE = clean_up_tokenization_spaces
if generate_kwargs:
forward_params.update(lowerCamelCase )
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[Any] ,lowerCamelCase : Union[Conversation, List[Conversation]] ,lowerCamelCase : Tuple=0 ,**lowerCamelCase : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = super().__call__(lowerCamelCase ,num_workers=lowerCamelCase ,**lowerCamelCase )
if isinstance(lowerCamelCase ,lowerCamelCase ) and len(lowerCamelCase ) == 1:
return outputs[0]
return outputs
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Conversation ,lowerCamelCase : Optional[int]=32 ):
'''simple docstring'''
if not isinstance(lowerCamelCase ,lowerCamelCase ):
raise ValueError("""ConversationalPipeline, expects Conversation as inputs""" )
if conversation.new_user_input is None:
raise ValueError(
f"""Conversation with UUID {type(conversation.uuid )} does not contain new user input to process. """
"""Add user inputs with the conversation's `add_user_input` method""" )
if hasattr(self.tokenizer ,"""_build_conversation_input_ids""" ):
__SCREAMING_SNAKE_CASE = self.tokenizer._build_conversation_input_ids(lowerCamelCase )
else:
# If the tokenizer cannot handle conversations, we default to only the old version
__SCREAMING_SNAKE_CASE = self._legacy_parse_and_tokenize(lowerCamelCase )
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = torch.LongTensor([input_ids] )
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = tf.constant([input_ids] )
return {"input_ids": input_ids, "conversation": conversation}
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[str] ,lowerCamelCase : Tuple=10 ,**lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = generate_kwargs.get("""max_length""" ,self.model.config.max_length )
__SCREAMING_SNAKE_CASE = model_inputs["""input_ids"""].shape[1]
if max_length - minimum_tokens < n:
logger.warning(f"""Conversation input is to long ({n}), trimming it to ({max_length} - {minimum_tokens})""" )
__SCREAMING_SNAKE_CASE = max_length - minimum_tokens
__SCREAMING_SNAKE_CASE = model_inputs["""input_ids"""][:, -trim:]
if "attention_mask" in model_inputs:
__SCREAMING_SNAKE_CASE = model_inputs["""attention_mask"""][:, -trim:]
__SCREAMING_SNAKE_CASE = model_inputs.pop("""conversation""" )
__SCREAMING_SNAKE_CASE = max_length
__SCREAMING_SNAKE_CASE = self.model.generate(**lowerCamelCase ,**lowerCamelCase )
if self.model.config.is_encoder_decoder:
__SCREAMING_SNAKE_CASE = 1
else:
__SCREAMING_SNAKE_CASE = n
return {"output_ids": output_ids[:, start_position:], "conversation": conversation}
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : str ,lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_outputs["""output_ids"""]
__SCREAMING_SNAKE_CASE = self.tokenizer.decode(
output_ids[0] ,skip_special_tokens=lowerCamelCase ,clean_up_tokenization_spaces=lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = model_outputs["""conversation"""]
conversation.mark_processed()
conversation.append_response(lowerCamelCase )
return conversation
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Conversation ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.tokenizer.eos_token_id
__SCREAMING_SNAKE_CASE = []
for is_user, text in conversation.iter_texts():
if eos_token_id is not None:
input_ids.extend(self.tokenizer.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase ) + [eos_token_id] )
else:
input_ids.extend(self.tokenizer.encode(lowerCamelCase ,add_special_tokens=lowerCamelCase ) )
if len(lowerCamelCase ) > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE = input_ids[-self.tokenizer.model_max_length :]
return input_ids
| 717 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 1 , __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
for divide_by_number in range(__UpperCAmelCase , digit + 1 ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = divide_by_number
else:
has_been_divided.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
'''simple docstring'''
from functools import reduce
a = (
"73167176531330624919225119674426574742355349194934"
"96983520312774506326239578318016984801869478851843"
"85861560789112949495459501737958331952853208805511"
"12540698747158523863050715693290963295227443043557"
"66896648950445244523161731856403098711121722383113"
"62229893423380308135336276614282806444486645238749"
"30358907296290491560440772390713810515859307960866"
"70172427121883998797908792274921901699720888093776"
"65727333001053367881220235421809751254540594752243"
"52584907711670556013604839586446706324415722155397"
"53697817977846174064955149290862569321978468622482"
"83972241375657056057490261407972968652414535100474"
"82166370484403199890008895243450658541227588666881"
"16427171479924442928230863465674813919123162824586"
"17866458359124566529476545682848912883142607690042"
"24219022671055626321111109370544217506941658960408"
"07198403850962455444362981230987879927244284909188"
"84580156166097919133875499200524063689912560717606"
"05886116467109405077541002256983155200055935729725"
"71636269561882670428252483600823257530420752963450"
)
def __magic_name__ ( __UpperCAmelCase = N ) -> int:
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda __UpperCAmelCase , __UpperCAmelCase : str(int(__UpperCAmelCase ) * int(__UpperCAmelCase ) ) , n[i : i + 13] ) )
for i in range(len(__UpperCAmelCase ) - 12 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 718 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str]=7 ,lowerCamelCase : List[str]=3 ,lowerCamelCase : List[str]=18 ,lowerCamelCase : Any=30 ,lowerCamelCase : Optional[Any]=400 ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=None ,lowerCamelCase : str=True ,lowerCamelCase : Dict=[0.48_145_466, 0.4_578_275, 0.40_821_073] ,lowerCamelCase : List[str]=[0.26_862_954, 0.26_130_258, 0.27_577_711] ,lowerCamelCase : Tuple=True ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 224, """width""": 224}
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
__SCREAMING_SNAKE_CASE = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Union[str, Any]=False ,lowerCamelCase : str=False ,lowerCamelCase : str=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) )
else:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 )
image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
if torchify:
__SCREAMING_SNAKE_CASE = [torch.from_numpy(lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,do_center_crop=lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 224, """width""": 224} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=lowerCamelCase )
__SCREAMING_SNAKE_CASE = 3
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 13 | 0 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'sew'
def __init__( self : str ,lowerCamelCase : Any=32 ,lowerCamelCase : str=768 ,lowerCamelCase : str=12 ,lowerCamelCase : Union[str, Any]=12 ,lowerCamelCase : Union[str, Any]=3072 ,lowerCamelCase : int=2 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : List[str]=1E-5 ,lowerCamelCase : Tuple="group" ,lowerCamelCase : Optional[Any]="gelu" ,lowerCamelCase : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,lowerCamelCase : Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,lowerCamelCase : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Dict=128 ,lowerCamelCase : Union[str, Any]=16 ,lowerCamelCase : List[Any]=True ,lowerCamelCase : List[Any]=0.05 ,lowerCamelCase : Optional[int]=10 ,lowerCamelCase : Any=2 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Tuple=10 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple="mean" ,lowerCamelCase : int=False ,lowerCamelCase : Dict=False ,lowerCamelCase : Optional[int]=256 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : Tuple=2 ,**lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase ,pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = squeeze_factor
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# sequence classification
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE = classifier_proj_size
@property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 719 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
def wrapper(*__UpperCAmelCase , **__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = timeit.default_timer()
__SCREAMING_SNAKE_CASE = func(*__UpperCAmelCase , **__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = timeit.default_timer() - starttime
return delta
__SCREAMING_SNAKE_CASE = func.__name__
return wrapper
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = seq_shapes or {}
for i in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__UpperCAmelCase , _ArrayXD ):
__SCREAMING_SNAKE_CASE = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__UpperCAmelCase , datasets.Value ):
if v.dtype == "string":
__SCREAMING_SNAKE_CASE = """The small grey turtle was surprisingly fast when challenged."""
else:
__SCREAMING_SNAKE_CASE = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__UpperCAmelCase , datasets.Sequence ):
while isinstance(__UpperCAmelCase , datasets.Sequence ):
__SCREAMING_SNAKE_CASE = v.feature
__SCREAMING_SNAKE_CASE = seq_shapes[k]
__SCREAMING_SNAKE_CASE = np.random.rand(*__UpperCAmelCase ).astype(v.dtype )
__SCREAMING_SNAKE_CASE = data
dummy_data.append((i, example) )
return dummy_data
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = generate_examples(__UpperCAmelCase , num_examples=__UpperCAmelCase , seq_shapes=__UpperCAmelCase )
with ArrowWriter(features=__UpperCAmelCase , path=__UpperCAmelCase ) as writer:
for key, record in dummy_data:
__SCREAMING_SNAKE_CASE = features.encode_example(__UpperCAmelCase )
writer.write(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
__SCREAMING_SNAKE_CASE = datasets.Dataset.from_file(filename=__UpperCAmelCase , info=datasets.DatasetInfo(features=__UpperCAmelCase ) )
return dataset
| 13 | 0 |
'''simple docstring'''
from ....utils import logging
a = logging.get_logger(__name__)
class __a ( _snake_case ):
def __init__( self : Tuple ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[int]=None ,lowerCamelCase : Optional[Any]=2048 ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = config.__dict__
__SCREAMING_SNAKE_CASE = modal_hidden_size
if num_labels:
__SCREAMING_SNAKE_CASE = num_labels
| 720 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a = "__DUMMY_TRANSFORMERS_USER__"
a = "Dummy User"
a = "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"
a = "https://hub-ci.huggingface.co"
a = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
a = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
a = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(__UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
return HfApi(endpoint=__UpperCAmelCase )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfFolder.get_token()
HfFolder.save_token(__UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
def _cleanup_repo(__UpperCAmelCase ):
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
@contextmanager
def _temporary_repo(__UpperCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(__UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 13 | 0 |
'''simple docstring'''
import torch
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
if torch.cuda.is_available():
__SCREAMING_SNAKE_CASE = torch.cuda.device_count()
else:
__SCREAMING_SNAKE_CASE = 0
print(f"""Successfully ran on {num_gpus} GPUs""" )
if __name__ == "__main__":
main()
| 721 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : Dict = 'deta'
__UpperCamelCase : List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Any=900 ,lowerCamelCase : int=2048 ,lowerCamelCase : Any=6 ,lowerCamelCase : Optional[Any]=2048 ,lowerCamelCase : str=8 ,lowerCamelCase : Union[str, Any]=6 ,lowerCamelCase : List[str]=1024 ,lowerCamelCase : int=8 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Any=True ,lowerCamelCase : Optional[int]="relu" ,lowerCamelCase : int=256 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[Any]=0.0 ,lowerCamelCase : Tuple=0.0 ,lowerCamelCase : List[str]=0.02 ,lowerCamelCase : Any=1.0 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]="sine" ,lowerCamelCase : Dict=5 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Any=True ,lowerCamelCase : int=300 ,lowerCamelCase : Any=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : int=1 ,lowerCamelCase : Tuple=5 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : int=1 ,lowerCamelCase : str=5 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.25 ,**lowerCamelCase : int ,):
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase )
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = auxiliary_loss
__SCREAMING_SNAKE_CASE = position_embedding_type
# deformable attributes
__SCREAMING_SNAKE_CASE = num_feature_levels
__SCREAMING_SNAKE_CASE = encoder_n_points
__SCREAMING_SNAKE_CASE = decoder_n_points
__SCREAMING_SNAKE_CASE = two_stage
__SCREAMING_SNAKE_CASE = two_stage_num_proposals
__SCREAMING_SNAKE_CASE = with_box_refine
__SCREAMING_SNAKE_CASE = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
__SCREAMING_SNAKE_CASE = class_cost
__SCREAMING_SNAKE_CASE = bbox_cost
__SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE = mask_loss_coefficient
__SCREAMING_SNAKE_CASE = dice_loss_coefficient
__SCREAMING_SNAKE_CASE = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE = giou_loss_coefficient
__SCREAMING_SNAKE_CASE = eos_coefficient
__SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.d_model
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 13 | 0 |
import multiprocessing
import os
from typing import BinaryIO, Optional, Union
import fsspec
from .. import Dataset, Features, NamedSplit, config
from ..formatting import query_table
from ..packaged_modules.json.json import Json
from ..utils import logging
from ..utils.typing import NestedDataStructureLike, PathLike
from .abc import AbstractDatasetReader
class __a ( _snake_case ):
def __init__( self : List[Any] ,lowerCamelCase : NestedDataStructureLike[PathLike] ,lowerCamelCase : Optional[NamedSplit] = None ,lowerCamelCase : Optional[Features] = None ,lowerCamelCase : str = None ,lowerCamelCase : bool = False ,lowerCamelCase : bool = False ,lowerCamelCase : Optional[str] = None ,lowerCamelCase : Optional[int] = None ,**lowerCamelCase : Optional[int] ,):
'''simple docstring'''
super().__init__(
lowerCamelCase ,split=lowerCamelCase ,features=lowerCamelCase ,cache_dir=lowerCamelCase ,keep_in_memory=lowerCamelCase ,streaming=lowerCamelCase ,num_proc=lowerCamelCase ,**lowerCamelCase ,)
__SCREAMING_SNAKE_CASE = field
__SCREAMING_SNAKE_CASE = path_or_paths if isinstance(lowerCamelCase ,lowerCamelCase ) else {self.split: path_or_paths}
__SCREAMING_SNAKE_CASE = Json(
cache_dir=lowerCamelCase ,data_files=lowerCamelCase ,features=lowerCamelCase ,field=lowerCamelCase ,**lowerCamelCase ,)
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.streaming:
__SCREAMING_SNAKE_CASE = self.builder.as_streaming_dataset(split=self.split )
# Build regular (map-style) dataset
else:
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = None
self.builder.download_and_prepare(
download_config=lowerCamelCase ,download_mode=lowerCamelCase ,verification_mode=lowerCamelCase ,base_path=lowerCamelCase ,num_proc=self.num_proc ,)
__SCREAMING_SNAKE_CASE = self.builder.as_dataset(
split=self.split ,verification_mode=lowerCamelCase ,in_memory=self.keep_in_memory )
return dataset
class __a :
def __init__( self : Tuple ,lowerCamelCase : Dataset ,lowerCamelCase : Union[PathLike, BinaryIO] ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Optional[int] = None ,**lowerCamelCase : str ,):
'''simple docstring'''
if num_proc is not None and num_proc <= 0:
raise ValueError(f"""num_proc {num_proc} must be an integer > 0.""" )
__SCREAMING_SNAKE_CASE = dataset
__SCREAMING_SNAKE_CASE = path_or_buf
__SCREAMING_SNAKE_CASE = batch_size if batch_size else config.DEFAULT_MAX_BATCH_SIZE
__SCREAMING_SNAKE_CASE = num_proc
__SCREAMING_SNAKE_CASE = """utf-8"""
__SCREAMING_SNAKE_CASE = to_json_kwargs
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""path_or_buf""" ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""orient""" ,"""records""" )
__SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""lines""" ,True if orient == """records""" else False )
__SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""index""" ,False if orient in ["""split""", """table"""] else True )
__SCREAMING_SNAKE_CASE = self.to_json_kwargs.pop("""compression""" ,lowerCamelCase )
if compression not in [None, "infer", "gzip", "bz2", "xz"]:
raise NotImplementedError(f"""`datasets` currently does not support {compression} compression""" )
if isinstance(self.path_or_buf ,(str, bytes, os.PathLike) ):
with fsspec.open(self.path_or_buf ,"""wb""" ,compression=lowerCamelCase ) as buffer:
__SCREAMING_SNAKE_CASE = self._write(file_obj=lowerCamelCase ,orient=lowerCamelCase ,lines=lowerCamelCase ,index=lowerCamelCase ,**self.to_json_kwargs )
else:
if compression:
raise NotImplementedError(
f"""The compression parameter is not supported when writing to a buffer, but compression={compression}"""
""" was passed. Please provide a local path instead.""" )
__SCREAMING_SNAKE_CASE = self._write(
file_obj=self.path_or_buf ,orient=lowerCamelCase ,lines=lowerCamelCase ,index=lowerCamelCase ,**self.to_json_kwargs )
return written
def UpperCAmelCase__ ( self : Optional[Any] ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = args
__SCREAMING_SNAKE_CASE = query_table(
table=self.dataset.data ,key=slice(lowerCamelCase ,offset + self.batch_size ) ,indices=self.dataset._indices ,)
__SCREAMING_SNAKE_CASE = batch.to_pandas().to_json(
path_or_buf=lowerCamelCase ,orient=lowerCamelCase ,lines=lowerCamelCase ,index=lowerCamelCase ,**lowerCamelCase )
if not json_str.endswith("""\n""" ):
json_str += "\n"
return json_str.encode(self.encoding )
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : BinaryIO ,lowerCamelCase : Optional[int] ,lowerCamelCase : Any ,lowerCamelCase : Union[str, Any] ,**lowerCamelCase : Any ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
if self.num_proc is None or self.num_proc == 1:
for offset in logging.tqdm(
range(0 ,len(self.dataset ) ,self.batch_size ) ,unit="""ba""" ,disable=not logging.is_progress_bar_enabled() ,desc="""Creating json from Arrow format""" ,):
__SCREAMING_SNAKE_CASE = self._batch_json((offset, orient, lines, index, to_json_kwargs) )
written += file_obj.write(lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = len(self.dataset ), self.batch_size
with multiprocessing.Pool(self.num_proc ) as pool:
for json_str in logging.tqdm(
pool.imap(
self._batch_json ,[(offset, orient, lines, index, to_json_kwargs) for offset in range(0 ,lowerCamelCase ,lowerCamelCase )] ,) ,total=(num_rows // batch_size) + 1 if num_rows % batch_size else num_rows // batch_size ,unit="""ba""" ,disable=not logging.is_progress_bar_enabled() ,desc="""Creating json from Arrow format""" ,):
written += file_obj.write(lowerCamelCase )
return written
| 700 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape
__SCREAMING_SNAKE_CASE = jax.image.resize(
lowerCamelCase ,shape=(batch, height * 2, width * 2, channels) ,method="""nearest""" ,)
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : int = None
__UpperCamelCase : float = 0.0
__UpperCamelCase : bool = None
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = nn.Dense(lowerCamelCase ,dtype=self.dtype )
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__SCREAMING_SNAKE_CASE = None
if use_nin_shortcut:
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="""VALID""" ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(lowerCamelCase ,1 ) ,1 )
__SCREAMING_SNAKE_CASE = hidden_states + temb
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.dropout(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
if self.conv_shortcut is not None:
__SCREAMING_SNAKE_CASE = self.conv_shortcut(lowerCamelCase )
return hidden_states + residual
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
import typing
from collections.abc import Iterable
import numpy as np
a = typing.Union[Iterable[float], Iterable[int], np.ndarray] # noqa: UP007
a = typing.Union[np.floataa, int, float] # noqa: UP007
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> VectorOut:
'''simple docstring'''
return np.sqrt(np.sum((np.asarray(__UpperCAmelCase ) - np.asarray(__UpperCAmelCase )) ** 2 ) )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> VectorOut:
'''simple docstring'''
return sum((va - va) ** 2 for va, va in zip(__UpperCAmelCase , __UpperCAmelCase ) ) ** (1 / 2)
if __name__ == "__main__":
def __magic_name__ ( ) -> None:
'''simple docstring'''
from timeit import timeit
print("""Without Numpy""" )
print(
timeit(
"""euclidean_distance_no_np([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
print("""With Numpy""" )
print(
timeit(
"""euclidean_distance([1, 2, 3], [4, 5, 6])""" , number=10000 , globals=globals() , ) )
benchmark()
| 701 |
'''simple docstring'''
import sys
from collections import defaultdict
class __a :
def __init__( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.node_position[vertex]
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pos
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = temp, tempa
__SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,lowerCamelCase )
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE = heap[parent]
__SCREAMING_SNAKE_CASE = position[parent]
self.set_position(position[parent] ,lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,lowerCamelCase )
break
__SCREAMING_SNAKE_CASE = parent
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,0 )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(lowerCamelCase ) // 2 - 1
for i in range(lowerCamelCase ,-1 ,-1 ):
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,len(lowerCamelCase ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = positions[0]
__SCREAMING_SNAKE_CASE = sys.maxsize
self.top_to_bottom(lowerCamelCase ,0 ,len(lowerCamelCase ) ,lowerCamelCase )
return temp
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Heap()
__SCREAMING_SNAKE_CASE = [0] * len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE = []
for vertex in range(len(__UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__UpperCAmelCase )
heap.node_position.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = distance
heap.heapify(__UpperCAmelCase , __UpperCAmelCase )
for _ in range(1 , len(__UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__UpperCAmelCase )]
):
__SCREAMING_SNAKE_CASE = distance
heap.bottom_to_top(
__UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
a = int(input("Enter number of edges: ").strip())
a = defaultdict(list)
for _ in range(edges_number):
a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13 | 0 |
'''simple docstring'''
import fire
from torch.utils.data import DataLoader
from tqdm import tqdm
from transformers import AutoTokenizer
from utils import SeqaSeqDataset, pickle_save
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1024 , __UpperCAmelCase=1024 , __UpperCAmelCase=False , **__UpperCAmelCase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = SeqaSeqDataset(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , type_path="""train""" , **__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = tok.pad_token_id
def get_lens(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = tqdm(
DataLoader(__UpperCAmelCase , batch_size=512 , num_workers=8 , shuffle=__UpperCAmelCase , collate_fn=ds.collate_fn ) , desc=str(ds.len_file ) , )
__SCREAMING_SNAKE_CASE = []
for batch in dl:
__SCREAMING_SNAKE_CASE = batch["""input_ids"""].ne(__UpperCAmelCase ).sum(1 ).tolist()
__SCREAMING_SNAKE_CASE = batch["""labels"""].ne(__UpperCAmelCase ).sum(1 ).tolist()
if consider_target:
for src, tgt in zip(__UpperCAmelCase , __UpperCAmelCase ):
max_lens.append(max(__UpperCAmelCase , __UpperCAmelCase ) )
else:
max_lens.extend(__UpperCAmelCase )
return max_lens
__SCREAMING_SNAKE_CASE = get_lens(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = SeqaSeqDataset(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , type_path="""val""" , **__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = get_lens(__UpperCAmelCase )
pickle_save(__UpperCAmelCase , train_ds.len_file )
pickle_save(__UpperCAmelCase , val_ds.len_file )
if __name__ == "__main__":
fire.Fire(save_len_file)
| 702 |
'''simple docstring'''
import os
import string
import sys
a = 1 << 8
a = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
a = KEYMAP["up"]
a = KEYMAP["left"]
if sys.platform == "win32":
a = []
a = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
a = ord(str(i))
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
__SCREAMING_SNAKE_CASE = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCAmelCase ) == 0:
# Read the keystroke
__SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__UpperCAmelCase )
if ord(__UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__SCREAMING_SNAKE_CASE = chr(KEYMAP["""esc"""] )
except KeyError:
__SCREAMING_SNAKE_CASE = cha[1]
else:
__SCREAMING_SNAKE_CASE = ch.decode(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__SCREAMING_SNAKE_CASE = sys.stdin.fileno()
__SCREAMING_SNAKE_CASE = termios.tcgetattr(__UpperCAmelCase )
try:
tty.setraw(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase )
return ch
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCAmelCase ) == KEYMAP["esc"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 13 | 0 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = int(__UpperCAmelCase )
assert noofclusters < len(__UpperCAmelCase )
# Find out the dimensionality
__SCREAMING_SNAKE_CASE = len(vectors[0] )
# Will help select random centroids from among the available vectors
__SCREAMING_SNAKE_CASE = list(range(len(__UpperCAmelCase ) ) )
shuffle(__UpperCAmelCase )
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
__SCREAMING_SNAKE_CASE = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
__SCREAMING_SNAKE_CASE = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
__SCREAMING_SNAKE_CASE = [
tf.Variable(vectors[vector_indices[i]] ) for i in range(__UpperCAmelCase )
]
##These nodes will assign the centroid Variables the appropriate
##values
__SCREAMING_SNAKE_CASE = tf.placeholder("""float64""" , [dim] )
__SCREAMING_SNAKE_CASE = []
for centroid in centroids:
cent_assigns.append(tf.assign(__UpperCAmelCase , __UpperCAmelCase ) )
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
__SCREAMING_SNAKE_CASE = [tf.Variable(0 ) for i in range(len(__UpperCAmelCase ) )]
##These nodes will assign an assignment Variable the appropriate
##value
__SCREAMING_SNAKE_CASE = tf.placeholder("""int32""" )
__SCREAMING_SNAKE_CASE = []
for assignment in assignments:
cluster_assigns.append(tf.assign(__UpperCAmelCase , __UpperCAmelCase ) )
##Now lets construct the node that will compute the mean
# The placeholder for the input
__SCREAMING_SNAKE_CASE = tf.placeholder("""float""" , [None, dim] )
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
__SCREAMING_SNAKE_CASE = tf.reduce_mean(__UpperCAmelCase , 0 )
##Node for computing Euclidean distances
# Placeholders for input
__SCREAMING_SNAKE_CASE = tf.placeholder("""float""" , [dim] )
__SCREAMING_SNAKE_CASE = tf.placeholder("""float""" , [dim] )
__SCREAMING_SNAKE_CASE = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(__UpperCAmelCase , __UpperCAmelCase ) , 2 ) ) )
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
__SCREAMING_SNAKE_CASE = tf.placeholder("""float""" , [noofclusters] )
__SCREAMING_SNAKE_CASE = tf.argmin(__UpperCAmelCase , 0 )
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
__SCREAMING_SNAKE_CASE = tf.initialize_all_variables()
# Initialize all variables
sess.run(__UpperCAmelCase )
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
__SCREAMING_SNAKE_CASE = 100
for _ in range(__UpperCAmelCase ):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(__UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
__SCREAMING_SNAKE_CASE = [
sess.run(__UpperCAmelCase , feed_dict={va: vect, va: sess.run(__UpperCAmelCase )} )
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
__SCREAMING_SNAKE_CASE = sess.run(
__UpperCAmelCase , feed_dict={centroid_distances: distances} )
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment} )
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(__UpperCAmelCase ):
# Collect all the vectors assigned to this cluster
__SCREAMING_SNAKE_CASE = [
vectors[i]
for i in range(len(__UpperCAmelCase ) )
if sess.run(assignments[i] ) == cluster_n
]
# Compute new centroid location
__SCREAMING_SNAKE_CASE = sess.run(
__UpperCAmelCase , feed_dict={mean_input: array(__UpperCAmelCase )} )
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location} )
# Return centroids and assignments
__SCREAMING_SNAKE_CASE = sess.run(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = sess.run(__UpperCAmelCase )
return centroids, assignments
| 703 |
'''simple docstring'''
from __future__ import annotations
import bisect
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
__SCREAMING_SNAKE_CASE = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE = midpoint - 1
else:
__SCREAMING_SNAKE_CASE = midpoint + 1
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if index != len(__UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
if right < left:
return None
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase )
if __name__ == "__main__":
a = input("Enter numbers separated by comma:\n").strip()
a = sorted(int(item) for item in user_input.split(","))
a = int(input("Enter a single number to be found in the list:\n"))
a = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 13 | 0 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : Dict = 'deta'
__UpperCamelCase : List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Any=900 ,lowerCamelCase : int=2048 ,lowerCamelCase : Any=6 ,lowerCamelCase : Optional[Any]=2048 ,lowerCamelCase : str=8 ,lowerCamelCase : Union[str, Any]=6 ,lowerCamelCase : List[str]=1024 ,lowerCamelCase : int=8 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Any=True ,lowerCamelCase : Optional[int]="relu" ,lowerCamelCase : int=256 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[Any]=0.0 ,lowerCamelCase : Tuple=0.0 ,lowerCamelCase : List[str]=0.02 ,lowerCamelCase : Any=1.0 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]="sine" ,lowerCamelCase : Dict=5 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Any=True ,lowerCamelCase : int=300 ,lowerCamelCase : Any=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : int=1 ,lowerCamelCase : Tuple=5 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : int=1 ,lowerCamelCase : str=5 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.25 ,**lowerCamelCase : int ,):
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase )
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = auxiliary_loss
__SCREAMING_SNAKE_CASE = position_embedding_type
# deformable attributes
__SCREAMING_SNAKE_CASE = num_feature_levels
__SCREAMING_SNAKE_CASE = encoder_n_points
__SCREAMING_SNAKE_CASE = decoder_n_points
__SCREAMING_SNAKE_CASE = two_stage
__SCREAMING_SNAKE_CASE = two_stage_num_proposals
__SCREAMING_SNAKE_CASE = with_box_refine
__SCREAMING_SNAKE_CASE = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
__SCREAMING_SNAKE_CASE = class_cost
__SCREAMING_SNAKE_CASE = bbox_cost
__SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE = mask_loss_coefficient
__SCREAMING_SNAKE_CASE = dice_loss_coefficient
__SCREAMING_SNAKE_CASE = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE = giou_loss_coefficient
__SCREAMING_SNAKE_CASE = eos_coefficient
__SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.d_model
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 704 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a = logging.get_logger(__name__)
class __a ( _snake_case ):
__UpperCamelCase : int = 'linear'
__UpperCamelCase : Tuple = 'cosine'
__UpperCamelCase : Tuple = 'cosine_with_restarts'
__UpperCamelCase : List[Any] = 'polynomial'
__UpperCamelCase : Optional[Any] = 'constant'
__UpperCamelCase : Optional[int] = 'constant_with_warmup'
__UpperCamelCase : List[Any] = 'piecewise_constant'
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> List[Any]:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE = int(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = float(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ):
def rule_func(__UpperCAmelCase ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(__UpperCAmelCase , __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> int:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Tuple:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SchedulerType(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
| 13 | 0 |
'''simple docstring'''
import sys
from collections import defaultdict
class __a :
'''simple docstring'''
def __init__( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.node_position[vertex]
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pos
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = temp, tempa
__SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,lowerCamelCase )
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE = heap[parent]
__SCREAMING_SNAKE_CASE = position[parent]
self.set_position(position[parent] ,lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,lowerCamelCase )
break
__SCREAMING_SNAKE_CASE = parent
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,0 )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(lowerCamelCase ) // 2 - 1
for i in range(lowerCamelCase ,-1 ,-1 ):
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,len(lowerCamelCase ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = positions[0]
__SCREAMING_SNAKE_CASE = sys.maxsize
self.top_to_bottom(lowerCamelCase ,0 ,len(lowerCamelCase ) ,lowerCamelCase )
return temp
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Heap()
__SCREAMING_SNAKE_CASE = [0] * len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE = []
for vertex in range(len(__UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__UpperCAmelCase )
heap.node_position.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = distance
heap.heapify(__UpperCAmelCase , __UpperCAmelCase )
for _ in range(1 , len(__UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__UpperCAmelCase )]
):
__SCREAMING_SNAKE_CASE = distance
heap.bottom_to_top(
__UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
a = int(input("Enter number of edges: ").strip())
a = defaultdict(list)
for _ in range(edges_number):
a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 705 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 | 0 |
'''simple docstring'''
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import OwlViTImageProcessor, OwlViTProcessor
@require_vision
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE = ["""""", """l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
__SCREAMING_SNAKE_CASE = dict(zip(lowerCamelCase ,range(len(lowerCamelCase ) ) ) )
__SCREAMING_SNAKE_CASE = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
__SCREAMING_SNAKE_CASE = {"""unk_token""": """<unk>"""}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""merges_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write(json.dumps(lowerCamelCase ) + """\n""" )
with open(self.merges_file ,"""w""" ,encoding="""utf-8""" ) as fp:
fp.write("""\n""".join(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = {
"""do_resize""": True,
"""size""": 20,
"""do_center_crop""": True,
"""crop_size""": 18,
"""do_normalize""": True,
"""image_mean""": [0.48_145_466, 0.4_578_275, 0.40_821_073],
"""image_std""": [0.26_862_954, 0.26_130_258, 0.27_577_711],
}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,lowerCamelCase )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ,**lowerCamelCase : List[str] ):
'''simple docstring'''
return CLIPTokenizer.from_pretrained(self.tmpdirname ,pad_token="""!""" ,**lowerCamelCase )
def UpperCAmelCase__ ( self : int ,**lowerCamelCase : List[str] ):
'''simple docstring'''
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,pad_token="""!""" ,**lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ,**lowerCamelCase : Tuple ):
'''simple docstring'''
return OwlViTImageProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_rust_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(self.tmpdirname ,use_fast=lowerCamelCase )
__SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,lowerCamelCase )
self.assertIsInstance(processor_fast.tokenizer ,lowerCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,lowerCamelCase )
self.assertIsInstance(processor_fast.image_processor ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=lowerCamelCase )
__SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=lowerCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,lowerCamelCase )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(lowerCamelCase ,return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = processor(images=lowerCamelCase ,return_tensors="""np""" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase ,return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = tokenizer(lowerCamelCase ,return_tensors="""np""" )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key][0].tolist() ,encoded_processor[key][0].tolist() )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase ,images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """google/owlvit-base-patch32"""
__SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = ["""cat""", """nasa badge"""]
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase )
__SCREAMING_SNAKE_CASE = 16
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape ,(2, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """google/owlvit-base-patch32"""
__SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = [["""cat""", """nasa badge"""], ["""person"""]]
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase )
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = len(lowerCamelCase )
__SCREAMING_SNAKE_CASE = max([len(lowerCamelCase ) for texts in input_texts] )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape ,(batch_size * num_max_text_queries, seq_length) )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """google/owlvit-base-patch32"""
__SCREAMING_SNAKE_CASE = OwlViTProcessor.from_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = ["""cat""", """nasa badge"""]
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase )
__SCREAMING_SNAKE_CASE = 16
__SCREAMING_SNAKE_CASE = inputs["""input_ids"""]
__SCREAMING_SNAKE_CASE = [
[4_9406, 2368, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[4_9406, 6841, 1_1301, 4_9407, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
]
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """attention_mask"""] )
self.assertEqual(inputs["""input_ids"""].shape ,(2, seq_length) )
self.assertListEqual(list(input_ids[0] ) ,predicted_ids[0] )
self.assertListEqual(list(input_ids[1] ) ,predicted_ids[1] )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(images=lowerCamelCase ,query_images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) ,["""query_pixel_values""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(lowerCamelCase ):
processor()
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = OwlViTProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
| 706 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase = "AAPL" ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase ).text , """html.parser""" )
__SCREAMING_SNAKE_CASE = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 13 | 0 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"RWKV/rwkv-4-169m-pile": "https://huggingface.co/RWKV/rwkv-4-169m-pile/resolve/main/config.json",
"RWKV/rwkv-4-430m-pile": "https://huggingface.co/RWKV/rwkv-4-430m-pile/resolve/main/config.json",
"RWKV/rwkv-4-1b5-pile": "https://huggingface.co/RWKV/rwkv-4-1b5-pile/resolve/main/config.json",
"RWKV/rwkv-4-3b-pile": "https://huggingface.co/RWKV/rwkv-4-3b-pile/resolve/main/config.json",
"RWKV/rwkv-4-7b-pile": "https://huggingface.co/RWKV/rwkv-4-7b-pile/resolve/main/config.json",
"RWKV/rwkv-4-14b-pile": "https://huggingface.co/RWKV/rwkv-4-14b-pile/resolve/main/config.json",
"RWKV/rwkv-raven-1b5": "https://huggingface.co/RWKV/rwkv-raven-1b5/resolve/main/config.json",
"RWKV/rwkv-raven-3b": "https://huggingface.co/RWKV/rwkv-raven-3b/resolve/main/config.json",
"RWKV/rwkv-raven-7b": "https://huggingface.co/RWKV/rwkv-raven-7b/resolve/main/config.json",
"RWKV/rwkv-raven-14b": "https://huggingface.co/RWKV/rwkv-raven-14b/resolve/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : List[Any] = 'rwkv'
__UpperCamelCase : List[str] = {'max_position_embeddings': 'context_length'}
def __init__( self : Union[str, Any] ,lowerCamelCase : Optional[Any]=5_0277 ,lowerCamelCase : int=1024 ,lowerCamelCase : Any=4096 ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Optional[int]=None ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Optional[int]=1E-5 ,lowerCamelCase : List[Any]=0 ,lowerCamelCase : str=0 ,lowerCamelCase : List[Any]=6 ,lowerCamelCase : List[str]=False ,lowerCamelCase : Dict=True ,**lowerCamelCase : List[Any] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = context_length
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = attention_hidden_size if attention_hidden_size is not None else hidden_size
__SCREAMING_SNAKE_CASE = intermediate_size if intermediate_size is not None else 4 * hidden_size
__SCREAMING_SNAKE_CASE = layer_norm_epsilon
__SCREAMING_SNAKE_CASE = rescale_every
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = bos_token_id
__SCREAMING_SNAKE_CASE = eos_token_id
super().__init__(
tie_word_embeddings=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
| 707 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> bool:
'''simple docstring'''
if num < 0:
return False
__SCREAMING_SNAKE_CASE = num
__SCREAMING_SNAKE_CASE = 0
while num > 0:
__SCREAMING_SNAKE_CASE = rev_num * 10 + (num % 10)
num //= 10
return num_copy == rev_num
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
'''simple docstring'''
import inspect
from typing import Callable, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import DiffusionPipeline
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
from diffusers.schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler
from diffusers.utils import logging
a = logging.get_logger(__name__) # pylint: disable=invalid-name
class __a ( _snake_case ):
def __init__( self : str ,lowerCamelCase : AutoencoderKL ,lowerCamelCase : CLIPTextModel ,lowerCamelCase : CLIPTokenizer ,lowerCamelCase : UNetaDConditionModel ,lowerCamelCase : Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler] ,lowerCamelCase : StableDiffusionSafetyChecker ,lowerCamelCase : CLIPImageProcessor ,):
'''simple docstring'''
super().__init__()
self.register_modules(
vae=lowerCamelCase ,text_encoder=lowerCamelCase ,tokenizer=lowerCamelCase ,unet=lowerCamelCase ,scheduler=lowerCamelCase ,safety_checker=lowerCamelCase ,feature_extractor=lowerCamelCase ,)
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : Optional[Union[str, int]] = "auto" ):
'''simple docstring'''
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
__SCREAMING_SNAKE_CASE = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCamelCase )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
self.enable_attention_slicing(lowerCamelCase )
@torch.no_grad()
def __call__( self : Optional[Any] ,lowerCamelCase : Union[str, List[str]] ,lowerCamelCase : int = 512 ,lowerCamelCase : int = 512 ,lowerCamelCase : int = 50 ,lowerCamelCase : float = 7.5 ,lowerCamelCase : Optional[Union[str, List[str]]] = None ,lowerCamelCase : Optional[int] = 1 ,lowerCamelCase : float = 0.0 ,lowerCamelCase : Optional[torch.Generator] = None ,lowerCamelCase : Optional[torch.FloatTensor] = None ,lowerCamelCase : Optional[str] = "pil" ,lowerCamelCase : bool = True ,lowerCamelCase : Optional[Callable[[int, int, torch.FloatTensor], None]] = None ,lowerCamelCase : int = 1 ,lowerCamelCase : Optional[torch.FloatTensor] = None ,**lowerCamelCase : Any ,):
'''simple docstring'''
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = 1
elif isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = len(lowerCamelCase )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(lowerCamelCase )}""" )
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f"""`height` and `width` have to be divisible by 8 but are {height} and {width}.""" )
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(lowerCamelCase ,lowerCamelCase ) or callback_steps <= 0)
):
raise ValueError(
f"""`callback_steps` has to be a positive integer but is {callback_steps} of type"""
f""" {type(lowerCamelCase )}.""" )
# get prompt text embeddings
__SCREAMING_SNAKE_CASE = self.tokenizer(
lowerCamelCase ,padding="""max_length""" ,max_length=self.tokenizer.model_max_length ,return_tensors="""pt""" ,)
__SCREAMING_SNAKE_CASE = text_inputs.input_ids
if text_input_ids.shape[-1] > self.tokenizer.model_max_length:
__SCREAMING_SNAKE_CASE = self.tokenizer.batch_decode(text_input_ids[:, self.tokenizer.model_max_length :] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
__SCREAMING_SNAKE_CASE = text_input_ids[:, : self.tokenizer.model_max_length]
if text_embeddings is None:
__SCREAMING_SNAKE_CASE = self.text_encoder(text_input_ids.to(self.device ) )[0]
# duplicate text embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = text_embeddings.shape
__SCREAMING_SNAKE_CASE = text_embeddings.repeat(1 ,lowerCamelCase ,1 )
__SCREAMING_SNAKE_CASE = text_embeddings.view(bs_embed * num_images_per_prompt ,lowerCamelCase ,-1 )
# here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
# of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
# corresponds to doing no classifier free guidance.
__SCREAMING_SNAKE_CASE = guidance_scale > 1.0
# get unconditional embeddings for classifier free guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE = 42
if negative_prompt is None:
__SCREAMING_SNAKE_CASE = [""""""]
elif type(lowerCamelCase ) is not type(lowerCamelCase ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(lowerCamelCase )} !="""
f""" {type(lowerCamelCase )}.""" )
elif isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = [negative_prompt]
elif batch_size != len(lowerCamelCase ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(lowerCamelCase )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
__SCREAMING_SNAKE_CASE = negative_prompt
__SCREAMING_SNAKE_CASE = text_input_ids.shape[-1]
__SCREAMING_SNAKE_CASE = self.tokenizer(
lowerCamelCase ,padding="""max_length""" ,max_length=lowerCamelCase ,truncation=lowerCamelCase ,return_tensors="""pt""" ,)
__SCREAMING_SNAKE_CASE = self.text_encoder(uncond_input.input_ids.to(self.device ) )[0]
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
__SCREAMING_SNAKE_CASE = uncond_embeddings.shape[1]
__SCREAMING_SNAKE_CASE = uncond_embeddings.repeat(lowerCamelCase ,lowerCamelCase ,1 )
__SCREAMING_SNAKE_CASE = uncond_embeddings.view(batch_size * num_images_per_prompt ,lowerCamelCase ,-1 )
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
__SCREAMING_SNAKE_CASE = torch.cat([uncond_embeddings, text_embeddings] )
# get the initial random noise unless the user supplied it
# Unlike in other pipelines, latents need to be generated in the target device
# for 1-to-1 results reproducibility with the CompVis implementation.
# However this currently doesn't work in `mps`.
__SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
__SCREAMING_SNAKE_CASE = (batch_size * num_images_per_prompt, self.unet.config.in_channels, 64, 64)
__SCREAMING_SNAKE_CASE = text_embeddings.dtype
if latents is None:
if self.device.type == "mps":
# randn does not exist on mps
__SCREAMING_SNAKE_CASE = torch.randn(
lowerCamelCase ,generator=lowerCamelCase ,device="""cpu""" ,dtype=lowerCamelCase ).to(self.device )
__SCREAMING_SNAKE_CASE = torch.randn(lowerCamelCase ,generator=lowerCamelCase ,device="""cpu""" ,dtype=lowerCamelCase ).to(
self.device )
else:
__SCREAMING_SNAKE_CASE = torch.randn(
lowerCamelCase ,generator=lowerCamelCase ,device=self.device ,dtype=lowerCamelCase )
__SCREAMING_SNAKE_CASE = torch.randn(lowerCamelCase ,generator=lowerCamelCase ,device=self.device ,dtype=lowerCamelCase )
else:
if latents_reference.shape != latents_shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {latents_shape}""" )
__SCREAMING_SNAKE_CASE = latents_reference.to(self.device )
__SCREAMING_SNAKE_CASE = latents.to(self.device )
# This is the key part of the pipeline where we
# try to ensure that the generated images w/ the same seed
# but different sizes actually result in similar images
__SCREAMING_SNAKE_CASE = (latents_shape[3] - latents_shape_reference[3]) // 2
__SCREAMING_SNAKE_CASE = (latents_shape[2] - latents_shape_reference[2]) // 2
__SCREAMING_SNAKE_CASE = latents_shape_reference[3] if dx >= 0 else latents_shape_reference[3] + 2 * dx
__SCREAMING_SNAKE_CASE = latents_shape_reference[2] if dy >= 0 else latents_shape_reference[2] + 2 * dy
__SCREAMING_SNAKE_CASE = 0 if dx < 0 else dx
__SCREAMING_SNAKE_CASE = 0 if dy < 0 else dy
__SCREAMING_SNAKE_CASE = max(-dx ,0 )
__SCREAMING_SNAKE_CASE = max(-dy ,0 )
# import pdb
# pdb.set_trace()
__SCREAMING_SNAKE_CASE = latents_reference[:, :, dy : dy + h, dx : dx + w]
# set timesteps
self.scheduler.set_timesteps(lowerCamelCase )
# Some schedulers like PNDM have timesteps as arrays
# It's more optimized to move all timesteps to correct device beforehand
__SCREAMING_SNAKE_CASE = self.scheduler.timesteps.to(self.device )
# scale the initial noise by the standard deviation required by the scheduler
__SCREAMING_SNAKE_CASE = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
__SCREAMING_SNAKE_CASE = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
__SCREAMING_SNAKE_CASE = {}
if accepts_eta:
__SCREAMING_SNAKE_CASE = eta
for i, t in enumerate(self.progress_bar(lowerCamelCase ) ):
# expand the latents if we are doing classifier free guidance
__SCREAMING_SNAKE_CASE = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
__SCREAMING_SNAKE_CASE = self.scheduler.scale_model_input(lowerCamelCase ,lowerCamelCase )
# predict the noise residual
__SCREAMING_SNAKE_CASE = self.unet(lowerCamelCase ,lowerCamelCase ,encoder_hidden_states=lowerCamelCase ).sample
# perform guidance
if do_classifier_free_guidance:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = noise_pred.chunk(2 )
__SCREAMING_SNAKE_CASE = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
# compute the previous noisy sample x_t -> x_t-1
__SCREAMING_SNAKE_CASE = self.scheduler.step(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,**lowerCamelCase ).prev_sample
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = 1 / 0.18_215 * latents
__SCREAMING_SNAKE_CASE = self.vae.decode(lowerCamelCase ).sample
__SCREAMING_SNAKE_CASE = (image / 2 + 0.5).clamp(0 ,1 )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
__SCREAMING_SNAKE_CASE = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if self.safety_checker is not None:
__SCREAMING_SNAKE_CASE = self.feature_extractor(self.numpy_to_pil(lowerCamelCase ) ,return_tensors="""pt""" ).to(
self.device )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.safety_checker(
images=lowerCamelCase ,clip_input=safety_checker_input.pixel_values.to(text_embeddings.dtype ) )
else:
__SCREAMING_SNAKE_CASE = None
if output_type == "pil":
__SCREAMING_SNAKE_CASE = self.numpy_to_pil(lowerCamelCase )
if not return_dict:
return (image, has_nsfw_concept)
return StableDiffusionPipelineOutput(images=lowerCamelCase ,nsfw_content_detected=lowerCamelCase )
| 708 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
a = list[list[float | int]]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Matrix:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(size + 1 )] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for row in range(__UpperCAmelCase ):
for col in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = matrix[row][col]
__SCREAMING_SNAKE_CASE = vector[row][0]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
while row < size and col < size:
# pivoting
__SCREAMING_SNAKE_CASE = max((abs(augmented[rowa][col] ), rowa) for rowa in range(__UpperCAmelCase , __UpperCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = augmented[rowa][col] / augmented[row][col]
__SCREAMING_SNAKE_CASE = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , __UpperCAmelCase ):
for row in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = augmented[row][col] / augmented[col][col]
for cola in range(__UpperCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(__UpperCAmelCase )
]
def __magic_name__ ( __UpperCAmelCase ) -> Callable[[int], int]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [[0 for _ in range(__UpperCAmelCase )] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = [[0] for _ in range(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for x_val, y_val in enumerate(__UpperCAmelCase ):
for col in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = (x_val + 1) ** (size - col - 1)
__SCREAMING_SNAKE_CASE = y_val
__SCREAMING_SNAKE_CASE = solve(__UpperCAmelCase , __UpperCAmelCase )
def interpolated_func(__UpperCAmelCase ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(__UpperCAmelCase ) )
return interpolated_func
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def __magic_name__ ( __UpperCAmelCase = question_function , __UpperCAmelCase = 10 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [func(__UpperCAmelCase ) for x_val in range(1 , order + 1 )]
__SCREAMING_SNAKE_CASE = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 42
__SCREAMING_SNAKE_CASE = 42
for poly in polynomials:
__SCREAMING_SNAKE_CASE = 1
while func(__UpperCAmelCase ) == poly(__UpperCAmelCase ):
x_val += 1
ret += poly(__UpperCAmelCase )
return ret
if __name__ == "__main__":
print(F'''{solution() = }''')
| 13 | 0 |
'''simple docstring'''
from statistics import mean
import numpy as np
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
# Number of processes finished
__SCREAMING_SNAKE_CASE = 0
# Displays the finished process.
# If it is 0, the performance is completed if it is 1, before the performance.
__SCREAMING_SNAKE_CASE = [0] * no_of_process
# List to include calculation results
__SCREAMING_SNAKE_CASE = [0] * no_of_process
# Sort by arrival time.
__SCREAMING_SNAKE_CASE = [burst_time[i] for i in np.argsort(__UpperCAmelCase )]
__SCREAMING_SNAKE_CASE = [process_name[i] for i in np.argsort(__UpperCAmelCase )]
arrival_time.sort()
while no_of_process > finished_process_count:
__SCREAMING_SNAKE_CASE = 0
while finished_process[i] == 1:
i += 1
if current_time < arrival_time[i]:
__SCREAMING_SNAKE_CASE = arrival_time[i]
__SCREAMING_SNAKE_CASE = 0
# Index showing the location of the process being performed
__SCREAMING_SNAKE_CASE = 0
# Saves the current response ratio.
__SCREAMING_SNAKE_CASE = 0
for i in range(0 , __UpperCAmelCase ):
if finished_process[i] == 0 and arrival_time[i] <= current_time:
__SCREAMING_SNAKE_CASE = (burst_time[i] + (current_time - arrival_time[i])) / burst_time[
i
]
if response_ratio < temp:
__SCREAMING_SNAKE_CASE = temp
__SCREAMING_SNAKE_CASE = i
# Calculate the turn around time
__SCREAMING_SNAKE_CASE = current_time + burst_time[loc] - arrival_time[loc]
current_time += burst_time[loc]
# Indicates that the process has been performed.
__SCREAMING_SNAKE_CASE = 1
# Increase finished_process_count by 1
finished_process_count += 1
return turn_around_time
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> list:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [0] * no_of_process
for i in range(0 , __UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = turn_around_time[i] - burst_time[i]
return waiting_time
if __name__ == "__main__":
a = 5
a = ["A", "B", "C", "D", "E"]
a = [1, 2, 3, 4, 5]
a = [1, 2, 3, 4, 5]
a = calculate_turn_around_time(
process_name, arrival_time, burst_time, no_of_process
)
a = calculate_waiting_time(
process_name, turn_around_time, burst_time, no_of_process
)
print("Process name \tArrival time \tBurst time \tTurn around time \tWaiting time")
for i in range(0, no_of_process):
print(
F'''{process_name[i]}\t\t{arrival_time[i]}\t\t{burst_time[i]}\t\t'''
F'''{turn_around_time[i]}\t\t\t{waiting_time[i]}'''
)
print(F'''average waiting time : {mean(waiting_time):.5f}''')
print(F'''average turn around time : {mean(turn_around_time):.5f}''')
| 709 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
a = logging.get_logger(__name__)
@add_end_docstrings(_snake_case )
class __a ( _snake_case ):
def __init__( self : Union[str, Any] ,**lowerCamelCase : str ):
'''simple docstring'''
super().__init__(**lowerCamelCase )
requires_backends(self ,"""vision""" )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == """tf"""
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Dict ,lowerCamelCase : Union[str, List[str], "Image", List["Image"]] ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
return super().__call__(lowerCamelCase ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[Any] ,**lowerCamelCase : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
if "candidate_labels" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""candidate_labels"""]
if "hypothesis_template" in kwargs:
__SCREAMING_SNAKE_CASE = kwargs["""hypothesis_template"""]
return preprocess_params, {}, {}
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Union[str, Any]="This is a photo of {}." ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_image(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.image_processor(images=[image] ,return_tensors=self.framework )
__SCREAMING_SNAKE_CASE = candidate_labels
__SCREAMING_SNAKE_CASE = [hypothesis_template.format(lowerCamelCase ) for x in candidate_labels]
__SCREAMING_SNAKE_CASE = self.tokenizer(lowerCamelCase ,return_tensors=self.framework ,padding=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [text_inputs]
return inputs
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_inputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_inputs.pop("""text_inputs""" )
if isinstance(text_inputs[0] ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = text_inputs[0]
else:
# Batching case.
__SCREAMING_SNAKE_CASE = text_inputs[0][0]
__SCREAMING_SNAKE_CASE = self.model(**lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = {
"""candidate_labels""": candidate_labels,
"""logits""": outputs.logits_per_image,
}
return model_outputs
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_outputs.pop("""candidate_labels""" )
__SCREAMING_SNAKE_CASE = model_outputs["""logits"""][0]
if self.framework == "pt":
__SCREAMING_SNAKE_CASE = logits.softmax(dim=-1 ).squeeze(-1 )
__SCREAMING_SNAKE_CASE = probs.tolist()
if not isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = [scores]
elif self.framework == "tf":
__SCREAMING_SNAKE_CASE = stable_softmax(lowerCamelCase ,axis=-1 )
__SCREAMING_SNAKE_CASE = probs.numpy().tolist()
else:
raise ValueError(f"""Unsupported framework: {self.framework}""" )
__SCREAMING_SNAKE_CASE = [
{"""score""": score, """label""": candidate_label}
for score, candidate_label in sorted(zip(lowerCamelCase ,lowerCamelCase ) ,key=lambda lowerCamelCase : -x[0] )
]
return result
| 13 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {
"configuration_clipseg": [
"CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP",
"CLIPSegConfig",
"CLIPSegTextConfig",
"CLIPSegVisionConfig",
],
"processing_clipseg": ["CLIPSegProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST",
"CLIPSegModel",
"CLIPSegPreTrainedModel",
"CLIPSegTextModel",
"CLIPSegVisionModel",
"CLIPSegForImageSegmentation",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 710 |
'''simple docstring'''
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
a = [
"python",
"tqdm",
"regex",
"requests",
"packaging",
"filelock",
"numpy",
"tokenizers",
"huggingface-hub",
"safetensors",
"accelerate",
"pyyaml",
]
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
elif pkg == "accelerate":
# must be loaded here, or else tqdm check may fail
from .utils import is_accelerate_available
# Maybe switch to is_torch_available in the future here so that Accelerate is hard dep of
# Transformers with PyTorch
if not is_accelerate_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(F'''can\'t find {pkg} in {deps.keys()}, check dependency_versions_table.py''')
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
require_version(deps[pkg] , __UpperCAmelCase )
| 13 | 0 |
'''simple docstring'''
import collections
import inspect
import unittest
from typing import Dict, List, Tuple
from transformers import MaskFormerSwinConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, torch_device
from transformers.utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MaskFormerSwinBackbone
from transformers.models.maskformer import MaskFormerSwinModel
class __a :
def __init__( self : Optional[int] ,lowerCamelCase : Dict ,lowerCamelCase : Optional[int]=13 ,lowerCamelCase : Union[str, Any]=32 ,lowerCamelCase : str=2 ,lowerCamelCase : str=3 ,lowerCamelCase : List[str]=16 ,lowerCamelCase : Tuple=[1, 2, 1] ,lowerCamelCase : List[Any]=[2, 2, 4] ,lowerCamelCase : Tuple=2 ,lowerCamelCase : int=2.0 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Union[str, Any]=0.0 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : List[str]="gelu" ,lowerCamelCase : Dict=False ,lowerCamelCase : str=True ,lowerCamelCase : Any=0.02 ,lowerCamelCase : Tuple=1E-5 ,lowerCamelCase : str=True ,lowerCamelCase : Any=None ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : List[Any]=10 ,lowerCamelCase : Union[str, Any]=8 ,lowerCamelCase : str=["stage1", "stage2", "stage3"] ,lowerCamelCase : int=[1, 2, 3] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = embed_dim
__SCREAMING_SNAKE_CASE = depths
__SCREAMING_SNAKE_CASE = num_heads
__SCREAMING_SNAKE_CASE = window_size
__SCREAMING_SNAKE_CASE = mlp_ratio
__SCREAMING_SNAKE_CASE = qkv_bias
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = drop_path_rate
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = use_absolute_embeddings
__SCREAMING_SNAKE_CASE = patch_norm
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = scope
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = encoder_stride
__SCREAMING_SNAKE_CASE = out_features
__SCREAMING_SNAKE_CASE = out_indices
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = None
if self.use_labels:
__SCREAMING_SNAKE_CASE = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
__SCREAMING_SNAKE_CASE = self.get_config()
return config, pixel_values, labels
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
return MaskFormerSwinConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,embed_dim=self.embed_dim ,depths=self.depths ,num_heads=self.num_heads ,window_size=self.window_size ,mlp_ratio=self.mlp_ratio ,qkv_bias=self.qkv_bias ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,drop_path_rate=self.drop_path_rate ,hidden_act=self.hidden_act ,use_absolute_embeddings=self.use_absolute_embeddings ,path_norm=self.patch_norm ,layer_norm_eps=self.layer_norm_eps ,initializer_range=self.initializer_range ,encoder_stride=self.encoder_stride ,out_features=self.out_features ,out_indices=self.out_indices ,)
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MaskFormerSwinModel(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
__SCREAMING_SNAKE_CASE = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
__SCREAMING_SNAKE_CASE = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, expected_seq_len, expected_dim) )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MaskFormerSwinBackbone(config=lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[13, 16, 16, 16] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,[16, 32, 64] )
# verify ValueError
with self.parent.assertRaises(lowerCamelCase ):
__SCREAMING_SNAKE_CASE = ["""stem"""]
__SCREAMING_SNAKE_CASE = MaskFormerSwinBackbone(config=lowerCamelCase )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __a ( _snake_case, _snake_case, unittest.TestCase ):
__UpperCamelCase : Dict = (
(
MaskFormerSwinModel,
MaskFormerSwinBackbone,
)
if is_torch_available()
else ()
)
__UpperCamelCase : Optional[int] = {'feature-extraction': MaskFormerSwinModel} if is_torch_available() else {}
__UpperCamelCase : Dict = False
__UpperCamelCase : str = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Optional[Any] = False
__UpperCamelCase : Tuple = False
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MaskFormerSwinModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,embed_dim=37 )
@require_torch_multi_gpu
@unittest.skip(
reason=(
"""`MaskFormerSwinModel` outputs `hidden_states_spatial_dimensions` which doesn't work well with"""
""" `nn.DataParallel`"""
) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*lowerCamelCase )
@unittest.skip("""Swin does not use inputs_embeds""" )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip("""Swin does not support feedforward chunking""" )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
__SCREAMING_SNAKE_CASE = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCamelCase ,nn.Linear ) )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
@unittest.skip(reason="""MaskFormerSwin is only used as backbone and doesn't support output_attentions""" )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
pass
@unittest.skip(reason="""MaskFormerSwin is only used as an internal backbone""" )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Dict ,lowerCamelCase : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : List[Any] ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**self._prepare_for_class(lowerCamelCase ,lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = outputs.hidden_states
__SCREAMING_SNAKE_CASE = getattr(
self.model_tester ,"""expected_num_hidden_layers""" ,len(self.model_tester.depths ) + 1 )
self.assertEqual(len(lowerCamelCase ) ,lowerCamelCase )
# Swin has a different seq_length
__SCREAMING_SNAKE_CASE = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[num_patches, self.model_tester.embed_dim] ,)
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = 3
__SCREAMING_SNAKE_CASE = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size ,collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
__SCREAMING_SNAKE_CASE = (
config.patch_size
if isinstance(config.patch_size ,collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
__SCREAMING_SNAKE_CASE = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
__SCREAMING_SNAKE_CASE = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,(padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
__SCREAMING_SNAKE_CASE = True
self.check_hidden_states_output(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,(padded_height, padded_width) )
@unittest.skip(reason="""MaskFormerSwin doesn't have pretrained checkpoints""" )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="""This will be fixed once MaskFormerSwin is replaced by native Swin""" )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
def set_nan_tensor_to_zero(lowerCamelCase : Union[str, Any] ):
__SCREAMING_SNAKE_CASE = 0
return t
def check_equivalence(lowerCamelCase : Any ,lowerCamelCase : int ,lowerCamelCase : int ,lowerCamelCase : str={} ):
with torch.no_grad():
__SCREAMING_SNAKE_CASE = model(**lowerCamelCase ,return_dict=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(**lowerCamelCase ,return_dict=lowerCamelCase ,**lowerCamelCase ).to_tuple()
def recursive_check(lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any] ):
if isinstance(lowerCamelCase ,(List, Tuple) ):
for tuple_iterable_value, dict_iterable_value in zip(lowerCamelCase ,lowerCamelCase ):
recursive_check(lowerCamelCase ,lowerCamelCase )
elif isinstance(lowerCamelCase ,lowerCamelCase ):
for tuple_iterable_value, dict_iterable_value in zip(
tuple_object.values() ,dict_object.values() ):
recursive_check(lowerCamelCase ,lowerCamelCase )
elif tuple_object is None:
return
else:
self.assertTrue(
torch.allclose(
set_nan_tensor_to_zero(lowerCamelCase ) ,set_nan_tensor_to_zero(lowerCamelCase ) ,atol=1E-5 ) ,msg=(
"""Tuple and dict output are not equal. Difference:"""
f""" {torch.max(torch.abs(tuple_object - dict_object ) )}. Tuple has `nan`:"""
f""" {torch.isnan(lowerCamelCase ).any()} and `inf`: {torch.isinf(lowerCamelCase )}. Dict has"""
f""" `nan`: {torch.isnan(lowerCamelCase ).any()} and `inf`: {torch.isinf(lowerCamelCase )}."""
) ,)
recursive_check(lowerCamelCase ,lowerCamelCase )
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
model.to(lowerCamelCase )
model.eval()
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
check_equivalence(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase ,return_labels=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase ,return_labels=lowerCamelCase )
check_equivalence(lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
check_equivalence(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,{"""output_hidden_states""": True} )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase ,return_labels=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase ,return_labels=lowerCamelCase )
check_equivalence(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,{"""output_hidden_states""": True} )
@require_torch
class __a ( unittest.TestCase, _snake_case ):
__UpperCamelCase : Optional[Any] = (MaskFormerSwinBackbone,) if is_torch_available() else ()
__UpperCamelCase : Any = MaskFormerSwinConfig
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = MaskFormerSwinModelTester(self )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
__SCREAMING_SNAKE_CASE = inputs_dict["""pixel_values"""].shape[0]
for backbone_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = backbone_class(lowerCamelCase )
backbone.to(lowerCamelCase )
backbone.eval()
__SCREAMING_SNAKE_CASE = backbone(**lowerCamelCase )
# Test default outputs and verify feature maps
self.assertIsInstance(outputs.feature_maps ,lowerCamelCase )
self.assertTrue(len(outputs.feature_maps ) == len(backbone.channels ) )
for feature_map, n_channels in zip(outputs.feature_maps ,backbone.channels ):
self.assertTrue(feature_map.shape[:2] ,(batch_size, n_channels) )
self.assertIsNone(outputs.hidden_states )
self.assertIsNone(outputs.attentions )
# Test output_hidden_states=True
__SCREAMING_SNAKE_CASE = backbone(**lowerCamelCase ,output_hidden_states=lowerCamelCase )
self.assertIsNotNone(outputs.hidden_states )
self.assertTrue(len(outputs.hidden_states ) ,len(backbone.stage_names ) )
# We skip the stem layer
for hidden_states, n_channels in zip(outputs.hidden_states[1:] ,backbone.channels ):
for hidden_state in hidden_states:
# Hidden states are in the format (batch_size, (height * width), n_channels)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_state.shape
self.assertTrue((h_batch_size, h_n_channels) ,(batch_size, n_channels) )
# Test output_attentions=True
if self.has_attentions:
__SCREAMING_SNAKE_CASE = backbone(**lowerCamelCase ,output_attentions=lowerCamelCase )
self.assertIsNotNone(outputs.attentions )
| 711 |
'''simple docstring'''
import logging
import os
import random
import sys
from dataclasses import dataclass, field
from typing import Optional
import datasets
import numpy as np
import pandas as pd
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
BartForSequenceClassification,
DataCollatorWithPadding,
EvalPrediction,
HfArgumentParser,
TapexTokenizer,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version
from transformers.utils.versions import require_version
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.17.0.dev0")
require_version("datasets>=1.8.0", "To fix: pip install -r examples/pytorch/text-classification/requirements.txt")
a = logging.getLogger(__name__)
@dataclass
class __a :
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
__UpperCamelCase : Optional[str] = field(
default='tab_fact', metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'}, )
__UpperCamelCase : int = field(
default=1024, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Overwrite the cached preprocessed datasets or not.'} )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[int] = field(
default=_snake_case, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of prediction examples to this '
'value if set.'
)
}, )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the training data.'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'A csv or a json file containing the validation data.'} )
__UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'A csv or a json file containing the test data.'} )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
if self.dataset_name is not None:
pass
elif self.train_file is None or self.validation_file is None:
raise ValueError("""Need either a GLUE task, a training/validation file or a dataset name.""" )
else:
__SCREAMING_SNAKE_CASE = self.train_file.split(""".""" )[-1]
assert train_extension in ["csv", "json"], "`train_file` should be a csv or a json file."
__SCREAMING_SNAKE_CASE = self.validation_file.split(""".""" )[-1]
assert (
validation_extension == train_extension
), "`validation_file` should have the same extension (csv or json) as `train_file`."
@dataclass
class __a :
__UpperCamelCase : str = field(
default=_snake_case, metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, )
__UpperCamelCase : str = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
def __magic_name__ ( ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
__SCREAMING_SNAKE_CASE = training_args.get_process_log_level()
logger.setLevel(__UpperCAmelCase )
datasets.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.set_verbosity(__UpperCAmelCase )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ f"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Detecting last checkpoint.
__SCREAMING_SNAKE_CASE = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
__SCREAMING_SNAKE_CASE = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON training and evaluation files (see below)
# or specify a GLUE benchmark task (the dataset will be downloaded automatically from the datasets Hub).
#
# For JSON files, this script will use the `question` column for the input question and `table` column for the corresponding table.
#
# If the CSVs/JSONs contain only one non-label column, the script does single sentence classification on this
# single column. You can easily tweak this behavior (see below)
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__SCREAMING_SNAKE_CASE = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from your local files.
# CSV/JSON training and evaluation files are needed.
__SCREAMING_SNAKE_CASE = {"""train""": data_args.train_file, """validation""": data_args.validation_file}
# Get the test dataset: you can provide your own CSV/JSON test file (see below)
# when you use `do_predict` without specifying a GLUE benchmark task.
if training_args.do_predict:
if data_args.test_file is not None:
__SCREAMING_SNAKE_CASE = data_args.train_file.split(""".""" )[-1]
__SCREAMING_SNAKE_CASE = data_args.test_file.split(""".""" )[-1]
assert (
test_extension == train_extension
), "`test_file` should have the same extension (csv or json) as `train_file`."
__SCREAMING_SNAKE_CASE = data_args.test_file
else:
raise ValueError("""Need either a GLUE task or a test file for `do_predict`.""" )
for key in data_files.keys():
logger.info(f"""load a local file for {key}: {data_files[key]}""" )
if data_args.train_file.endswith(""".csv""" ):
# Loading a dataset from local csv files
__SCREAMING_SNAKE_CASE = load_dataset("""csv""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
else:
# Loading a dataset from local json files
__SCREAMING_SNAKE_CASE = load_dataset("""json""" , data_files=__UpperCAmelCase , cache_dir=model_args.cache_dir )
# See more about loading any type of standard or custom dataset at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Labels
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""].features["""label"""].names
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
# Load pretrained model and tokenizer
#
# In distributed training, the .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# load tapex tokenizer
__SCREAMING_SNAKE_CASE = TapexTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , add_prefix_space=__UpperCAmelCase , )
__SCREAMING_SNAKE_CASE = BartForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# Padding strategy
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = """max_length"""
else:
# We will pad later, dynamically at batch creation, to the max sequence length in each batch
__SCREAMING_SNAKE_CASE = False
# Some models have set the order of the labels to use, so let's make sure we do use it.
__SCREAMING_SNAKE_CASE = {"""Refused""": 0, """Entailed""": 1}
__SCREAMING_SNAKE_CASE = {0: """Refused""", 1: """Entailed"""}
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.""" )
__SCREAMING_SNAKE_CASE = min(data_args.max_seq_length , tokenizer.model_max_length )
def preprocess_tabfact_function(__UpperCAmelCase ):
# Tokenize the texts
def _convert_table_text_to_pandas(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = [_table_row.split("""#""" ) for _table_row in _table_text.strip("""\n""" ).split("""\n""" )]
__SCREAMING_SNAKE_CASE = pd.DataFrame.from_records(_table_content[1:] , columns=_table_content[0] )
return _table_pd
__SCREAMING_SNAKE_CASE = examples["""statement"""]
__SCREAMING_SNAKE_CASE = list(map(_convert_table_text_to_pandas , examples["""table_text"""] ) )
__SCREAMING_SNAKE_CASE = tokenizer(__UpperCAmelCase , __UpperCAmelCase , padding=__UpperCAmelCase , max_length=__UpperCAmelCase , truncation=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = examples["""label"""]
return result
with training_args.main_process_first(desc="""dataset map pre-processing""" ):
__SCREAMING_SNAKE_CASE = raw_datasets.map(
__UpperCAmelCase , batched=__UpperCAmelCase , load_from_cache_file=not data_args.overwrite_cache , desc="""Running tokenizer on dataset""" , )
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("""--do_train requires a train dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""train"""]
if data_args.max_train_samples is not None:
__SCREAMING_SNAKE_CASE = train_dataset.select(range(data_args.max_train_samples ) )
if training_args.do_eval:
if "validation" not in raw_datasets and "validation_matched" not in raw_datasets:
raise ValueError("""--do_eval requires a validation dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""validation"""]
if data_args.max_eval_samples is not None:
__SCREAMING_SNAKE_CASE = eval_dataset.select(range(data_args.max_eval_samples ) )
if training_args.do_predict or data_args.test_file is not None:
if "test" not in raw_datasets and "test_matched" not in raw_datasets:
raise ValueError("""--do_predict requires a test dataset""" )
__SCREAMING_SNAKE_CASE = raw_datasets["""test"""]
if data_args.max_predict_samples is not None:
__SCREAMING_SNAKE_CASE = predict_dataset.select(range(data_args.max_predict_samples ) )
# Log a few random samples from the training set:
if training_args.do_train:
for index in random.sample(range(len(__UpperCAmelCase ) ) , 3 ):
logger.info(f"""Sample {index} of the training set: {train_dataset[index]}.""" )
# You can define your custom compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with a
# predictions and label_ids field) and has to return a dictionary string to float.
def compute_metrics(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = p.predictions[0] if isinstance(p.predictions , __UpperCAmelCase ) else p.predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
return {"accuracy": (preds == p.label_ids).astype(np.floataa ).mean().item()}
# Data collator will default to DataCollatorWithPadding, so we change it if we already did the padding.
if data_args.pad_to_max_length:
__SCREAMING_SNAKE_CASE = default_data_collator
elif training_args.fpaa:
__SCREAMING_SNAKE_CASE = DataCollatorWithPadding(__UpperCAmelCase , pad_to_multiple_of=8 )
else:
__SCREAMING_SNAKE_CASE = None
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = Trainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , compute_metrics=__UpperCAmelCase , tokenizer=__UpperCAmelCase , data_collator=__UpperCAmelCase , )
# Training
if training_args.do_train:
__SCREAMING_SNAKE_CASE = None
if training_args.resume_from_checkpoint is not None:
__SCREAMING_SNAKE_CASE = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
__SCREAMING_SNAKE_CASE = last_checkpoint
__SCREAMING_SNAKE_CASE = trainer.train(resume_from_checkpoint=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = train_result.metrics
__SCREAMING_SNAKE_CASE = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__UpperCAmelCase )
)
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.save_model() # Saves the tokenizer too for easy upload
trainer.log_metrics("""train""" , __UpperCAmelCase )
trainer.save_metrics("""train""" , __UpperCAmelCase )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__SCREAMING_SNAKE_CASE = trainer.evaluate(eval_dataset=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = min(__UpperCAmelCase , len(__UpperCAmelCase ) )
trainer.log_metrics("""eval""" , __UpperCAmelCase )
trainer.save_metrics("""eval""" , __UpperCAmelCase )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
# Removing the `label` columns because it contains -1 and Trainer won't like that.
__SCREAMING_SNAKE_CASE = predict_dataset.remove_columns("""label""" )
__SCREAMING_SNAKE_CASE = trainer.predict(__UpperCAmelCase , metric_key_prefix="""predict""" ).predictions
__SCREAMING_SNAKE_CASE = np.argmax(__UpperCAmelCase , axis=1 )
__SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """predict_results_tabfact.txt""" )
if trainer.is_world_process_zero():
with open(__UpperCAmelCase , """w""" ) as writer:
logger.info("""***** Predict Results *****""" )
writer.write("""index\tprediction\n""" )
for index, item in enumerate(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = label_list[item]
writer.write(f"""{index}\t{item}\n""" )
__SCREAMING_SNAKE_CASE = {"""finetuned_from""": model_args.model_name_or_path, """tasks""": """text-classification"""}
if training_args.push_to_hub:
trainer.push_to_hub(**__UpperCAmelCase )
else:
trainer.create_model_card(**__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
main()
if __name__ == "__main__":
main()
| 13 | 0 |
'''simple docstring'''
a = {
"km/h": 1.0,
"m/s": 3.6,
"mph": 1.60_9344,
"knot": 1.852,
}
a = {
"km/h": 1.0,
"m/s": 0.2_7777_7778,
"mph": 0.6_2137_1192,
"knot": 0.5_3995_6803,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
if unit_to not in speed_chart or unit_from not in speed_chart_inverse:
__SCREAMING_SNAKE_CASE = (
f"""Incorrect 'from_type' or 'to_type' value: {unit_from!r}, {unit_to!r}\n"""
f"""Valid values are: {', '.join(__UpperCAmelCase )}"""
)
raise ValueError(__UpperCAmelCase )
return round(speed * speed_chart[unit_from] * speed_chart_inverse[unit_to] , 3 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
'''simple docstring'''
from ...utils import (
OptionalDependencyNotAvailable,
is_flax_available,
is_torch_available,
is_transformers_available,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .multicontrolnet import MultiControlNetModel
from .pipeline_controlnet import StableDiffusionControlNetPipeline
from .pipeline_controlnet_imgaimg import StableDiffusionControlNetImgaImgPipeline
from .pipeline_controlnet_inpaint import StableDiffusionControlNetInpaintPipeline
if is_transformers_available() and is_flax_available():
from .pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline
| 13 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = (
first_str_length if first_str_length > second_str_length else second_str_length
)
__SCREAMING_SNAKE_CASE = []
for char_count in range(__UpperCAmelCase ):
if char_count < first_str_length:
output_list.append(first_str[char_count] )
if char_count < second_str_length:
output_list.append(second_str[char_count] )
return "".join(__UpperCAmelCase )
if __name__ == "__main__":
print(alternative_string_arrange("AB", "XYZ"), end=" ")
| 713 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase , params=__UpperCAmelCase ).content , """html.parser""" )
__SCREAMING_SNAKE_CASE = soup.find("""div""" , attrs={"""class""": """gs_ri"""} )
__SCREAMING_SNAKE_CASE = div.find("""div""" , attrs={"""class""": """gs_fl"""} ).find_all("""a""" )
return anchors[2].get_text()
if __name__ == "__main__":
a = {
"title": (
"Precisely geometry controlled microsupercapacitors for ultrahigh areal "
"capacitance, volumetric capacitance, and energy density"
),
"journal": "Chem. Mater.",
"volume": 30,
"pages": "3979-3990",
"year": 2018,
"hl": "en",
}
print(get_citation("https://scholar.google.com/scholar_lookup", params=params))
| 13 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class __a ( unittest.TestCase ):
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = tempfile.mkdtemp()
# fmt: off
__SCREAMING_SNAKE_CASE = ["""[UNK]""", """[CLS]""", """[SEP]""", """[PAD]""", """[MASK]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing""", """,""", """low""", """lowest"""]
# fmt: on
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file ,"""w""" ,encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
__SCREAMING_SNAKE_CASE = {
"""do_resize""": True,
"""size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.5, 0.5, 0.5],
"""image_std""": [0.5, 0.5, 0.5],
}
__SCREAMING_SNAKE_CASE = os.path.join(self.tmpdirname ,lowerCamelCase )
with open(self.image_processor_file ,"""w""" ,encoding="""utf-8""" ) as fp:
json.dump(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : str ,**lowerCamelCase : Any ):
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname ,**lowerCamelCase )
def UpperCAmelCase__ ( self : Tuple ,**lowerCamelCase : List[Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor ,lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
__SCREAMING_SNAKE_CASE = self.get_tokenizer(bos_token="""(BOS)""" ,eos_token="""(EOS)""" )
__SCREAMING_SNAKE_CASE = self.get_image_processor(do_normalize=lowerCamelCase ,padding_value=1.0 )
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname ,bos_token="""(BOS)""" ,eos_token="""(EOS)""" ,do_normalize=lowerCamelCase ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,(BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = image_processor(lowerCamelCase ,return_tensors="""np""" )
__SCREAMING_SNAKE_CASE = processor(images=lowerCamelCase ,return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1E-2 )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer(lowerCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase ,images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) ,["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with self.assertRaises(lowerCamelCase ):
processor()
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
__SCREAMING_SNAKE_CASE = processor.batch_decode(lowerCamelCase )
__SCREAMING_SNAKE_CASE = tokenizer.batch_decode(lowerCamelCase )
self.assertListEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.get_image_processor()
__SCREAMING_SNAKE_CASE = self.get_tokenizer()
__SCREAMING_SNAKE_CASE = VisionTextDualEncoderProcessor(tokenizer=lowerCamelCase ,image_processor=lowerCamelCase )
__SCREAMING_SNAKE_CASE = """lower newer"""
__SCREAMING_SNAKE_CASE = self.prepare_image_inputs()
__SCREAMING_SNAKE_CASE = processor(text=lowerCamelCase ,images=lowerCamelCase )
self.assertListEqual(list(inputs.keys() ) ,processor.model_input_names )
| 714 |
'''simple docstring'''
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"camembert-base": "https://huggingface.co/camembert-base/resolve/main/config.json",
"umberto-commoncrawl-cased-v1": (
"https://huggingface.co/Musixmatch/umberto-commoncrawl-cased-v1/resolve/main/config.json"
),
"umberto-wikipedia-uncased-v1": (
"https://huggingface.co/Musixmatch/umberto-wikipedia-uncased-v1/resolve/main/config.json"
),
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'camembert'
def __init__( self : int ,lowerCamelCase : List[Any]=3_0522 ,lowerCamelCase : List[Any]=768 ,lowerCamelCase : str=12 ,lowerCamelCase : List[str]=12 ,lowerCamelCase : Optional[Any]=3072 ,lowerCamelCase : Tuple="gelu" ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=512 ,lowerCamelCase : Dict=2 ,lowerCamelCase : Tuple=0.02 ,lowerCamelCase : List[Any]=1E-1_2 ,lowerCamelCase : Union[str, Any]=1 ,lowerCamelCase : Optional[Any]=0 ,lowerCamelCase : List[Any]=2 ,lowerCamelCase : List[str]="absolute" ,lowerCamelCase : int=True ,lowerCamelCase : Any=None ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = vocab_size
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = type_vocab_size
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = position_embedding_type
__SCREAMING_SNAKE_CASE = use_cache
__SCREAMING_SNAKE_CASE = classifier_dropout
class __a ( _snake_case ):
@property
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
if self.task == "multiple-choice":
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
__SCREAMING_SNAKE_CASE = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] )
| 13 | 0 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from transformers import TvltFeatureExtractor, is_datasets_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_torch_available():
import torch
if is_datasets_available():
from datasets import load_dataset
a = random.Random()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=1.0 , __UpperCAmelCase=None , __UpperCAmelCase=None ) -> Optional[int]:
'''simple docstring'''
if rng is None:
__SCREAMING_SNAKE_CASE = global_rng
__SCREAMING_SNAKE_CASE = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
class __a ( unittest.TestCase ):
def __init__( self : List[Any] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Optional[Any]=7 ,lowerCamelCase : Union[str, Any]=400 ,lowerCamelCase : Any=2000 ,lowerCamelCase : Optional[Any]=2048 ,lowerCamelCase : Any=128 ,lowerCamelCase : List[Any]=1 ,lowerCamelCase : str=512 ,lowerCamelCase : Any=30 ,lowerCamelCase : List[str]=4_4100 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = min_seq_length
__SCREAMING_SNAKE_CASE = max_seq_length
__SCREAMING_SNAKE_CASE = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
__SCREAMING_SNAKE_CASE = spectrogram_length
__SCREAMING_SNAKE_CASE = feature_size
__SCREAMING_SNAKE_CASE = num_audio_channels
__SCREAMING_SNAKE_CASE = hop_length
__SCREAMING_SNAKE_CASE = chunk_length
__SCREAMING_SNAKE_CASE = sampling_rate
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return {
"spectrogram_length": self.spectrogram_length,
"feature_size": self.feature_size,
"num_audio_channels": self.num_audio_channels,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"sampling_rate": self.sampling_rate,
}
def UpperCAmelCase__ ( self : str ,lowerCamelCase : str=False ,lowerCamelCase : Any=False ):
'''simple docstring'''
def _flatten(lowerCamelCase : Optional[int] ):
return list(itertools.chain(*lowerCamelCase ) )
if equal_length:
__SCREAMING_SNAKE_CASE = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
__SCREAMING_SNAKE_CASE = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length ,self.max_seq_length ,self.seq_length_diff )
]
if numpify:
__SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Any = TvltFeatureExtractor
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = TvltFeatureExtractionTester(self )
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""spectrogram_length""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""feature_size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""num_audio_channels""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""hop_length""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""chunk_length""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""sampling_rate""" ) )
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = feat_extract_first.save_pretrained(lowerCamelCase )[0]
check_json_file_has_correct_format(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_pretrained(lowerCamelCase )
__SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
__SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
__SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
__SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase ,lowerCamelCase ) )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
__SCREAMING_SNAKE_CASE = os.path.join(lowerCamelCase ,"""feat_extract.json""" )
feat_extract_first.to_json_file(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.feature_extraction_class.from_json_file(lowerCamelCase )
__SCREAMING_SNAKE_CASE = feat_extract_first.to_dict()
__SCREAMING_SNAKE_CASE = feat_extract_second.to_dict()
__SCREAMING_SNAKE_CASE = dict_first.pop("""mel_filters""" )
__SCREAMING_SNAKE_CASE = dict_second.pop("""mel_filters""" )
self.assertTrue(np.allclose(lowerCamelCase ,lowerCamelCase ) )
self.assertEqual(lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.feature_extraction_class(**self.feat_extract_dict )
# create three inputs of length 800, 1000, and 1200
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in range(800 ,1400 ,200 )]
__SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase ) for speech_input in speech_inputs]
# Test not batched input
__SCREAMING_SNAKE_CASE = feature_extractor(np_speech_inputs[0] ,return_tensors="""np""" ,sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test batched
__SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase ,return_tensors="""np""" ,sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test audio masking
__SCREAMING_SNAKE_CASE = feature_extractor(
lowerCamelCase ,return_tensors="""np""" ,sampling_rate=4_4100 ,mask_audio=lowerCamelCase ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
# Test 2-D numpy arrays are batched.
__SCREAMING_SNAKE_CASE = [floats_list((1, x) )[0] for x in (800, 800, 800)]
__SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase )
__SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase ,return_tensors="""np""" ,sampling_rate=4_4100 ).audio_values
self.assertTrue(encoded_audios.ndim == 4 )
self.assertTrue(encoded_audios.shape[-1] == feature_extractor.feature_size )
self.assertTrue(encoded_audios.shape[-2] <= feature_extractor.spectrogram_length )
self.assertTrue(encoded_audios.shape[-3] == feature_extractor.num_channels )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = load_dataset("""hf-internal-testing/librispeech_asr_dummy""" ,"""clean""" ,split="""validation""" )
# automatic decoding with librispeech
__SCREAMING_SNAKE_CASE = ds.sort("""id""" ).select(range(lowerCamelCase ) )[:num_samples]["""audio"""]
return [x["array"] for x in speech_samples]
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self._load_datasamples(1 )
__SCREAMING_SNAKE_CASE = TvltFeatureExtractor()
__SCREAMING_SNAKE_CASE = feature_extractor(lowerCamelCase ,return_tensors="""pt""" ).audio_values
self.assertEquals(audio_values.shape ,(1, 1, 192, 128) )
__SCREAMING_SNAKE_CASE = torch.tensor([[-0.3_032, -0.2_708], [-0.4_434, -0.4_007]] )
self.assertTrue(torch.allclose(audio_values[0, 0, :2, :2] ,lowerCamelCase ,atol=1E-4 ) )
| 715 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from transformers import ViTConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor
if is_flax_available():
import jax
from transformers.models.vit.modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel
class __a ( unittest.TestCase ):
def __init__( self : Optional[int] ,lowerCamelCase : str ,lowerCamelCase : List[str]=13 ,lowerCamelCase : Optional[Any]=30 ,lowerCamelCase : Dict=2 ,lowerCamelCase : List[Any]=3 ,lowerCamelCase : List[str]=True ,lowerCamelCase : str=True ,lowerCamelCase : Optional[int]=32 ,lowerCamelCase : Dict=5 ,lowerCamelCase : Optional[int]=4 ,lowerCamelCase : List[Any]=37 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Any=0.1 ,lowerCamelCase : str=10 ,lowerCamelCase : Dict=0.02 ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = patch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = is_training
__SCREAMING_SNAKE_CASE = use_labels
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = hidden_dropout_prob
__SCREAMING_SNAKE_CASE = attention_probs_dropout_prob
__SCREAMING_SNAKE_CASE = type_sequence_label_size
__SCREAMING_SNAKE_CASE = initializer_range
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (image_size // patch_size) ** 2
__SCREAMING_SNAKE_CASE = num_patches + 1
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = ViTConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=lowerCamelCase ,initializer_range=self.initializer_range ,)
return config, pixel_values
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : int ,lowerCamelCase : Optional[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModel(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token)
__SCREAMING_SNAKE_CASE = (self.image_size, self.image_size)
__SCREAMING_SNAKE_CASE = (self.patch_size, self.patch_size)
__SCREAMING_SNAKE_CASE = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, num_patches + 1, self.hidden_size) )
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.type_sequence_label_size
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(config=lowerCamelCase )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
# test greyscale images
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = FlaxViTForImageClassification(lowerCamelCase )
__SCREAMING_SNAKE_CASE = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
__SCREAMING_SNAKE_CASE = model(lowerCamelCase )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.prepare_config_and_inputs()
(
(
__SCREAMING_SNAKE_CASE
) , (
__SCREAMING_SNAKE_CASE
) ,
) = config_and_inputs
__SCREAMING_SNAKE_CASE = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_flax
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Any = (FlaxViTModel, FlaxViTForImageClassification) if is_flax_available() else ()
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = FlaxViTModelTester(self )
__SCREAMING_SNAKE_CASE = ConfigTester(self ,config_class=lowerCamelCase ,has_text_modality=lowerCamelCase ,hidden_size=37 )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
self.config_tester.run_common_tests()
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCamelCase )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
__SCREAMING_SNAKE_CASE = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
__SCREAMING_SNAKE_CASE = [*signature.parameters.keys()]
__SCREAMING_SNAKE_CASE = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
__SCREAMING_SNAKE_CASE = self._prepare_for_class(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = model_class(lowerCamelCase )
@jax.jit
def model_jitted(lowerCamelCase : int ,**lowerCamelCase : Union[str, Any] ):
return model(pixel_values=lowerCamelCase ,**lowerCamelCase )
with self.subTest("""JIT Enabled""" ):
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
__SCREAMING_SNAKE_CASE = model_jitted(**lowerCamelCase ).to_tuple()
self.assertEqual(len(lowerCamelCase ) ,len(lowerCamelCase ) )
for jitted_output, output in zip(lowerCamelCase ,lowerCamelCase ):
self.assertEqual(jitted_output.shape ,output.shape )
@slow
def UpperCAmelCase__ ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
__SCREAMING_SNAKE_CASE = model_class_name.from_pretrained("""google/vit-base-patch16-224""" )
__SCREAMING_SNAKE_CASE = model(np.ones((1, 3, 224, 224) ) )
self.assertIsNotNone(lowerCamelCase )
| 13 | 0 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"EleutherAI/gpt-neo-1.3B": "https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class __a ( _snake_case ):
__UpperCamelCase : List[Any] = 'gpt_neo'
__UpperCamelCase : Tuple = ['past_key_values']
__UpperCamelCase : Optional[int] = {'num_attention_heads': 'num_heads', 'num_hidden_layers': 'num_layers'}
def __init__( self : List[Any] ,lowerCamelCase : Optional[Any]=5_0257 ,lowerCamelCase : List[str]=2048 ,lowerCamelCase : Optional[int]=2048 ,lowerCamelCase : Optional[Any]=24 ,lowerCamelCase : str=[[["global", "local"], 12]] ,lowerCamelCase : int=16 ,lowerCamelCase : Union[str, Any]=None ,lowerCamelCase : Optional[Any]=256 ,lowerCamelCase : Union[str, Any]="gelu_new" ,lowerCamelCase : List[str]=0.0 ,lowerCamelCase : List[Any]=0.0 ,lowerCamelCase : int=0.0 ,lowerCamelCase : List[str]=0.1 ,lowerCamelCase : Tuple=1E-5 ,lowerCamelCase : Optional[int]=0.02 ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Dict=5_0256 ,lowerCamelCase : Tuple=5_0256 ,**lowerCamelCase : Optional[Any] ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : List[str] = vocab_size
__SCREAMING_SNAKE_CASE : List[str] = max_position_embeddings
__SCREAMING_SNAKE_CASE : Any = hidden_size
__SCREAMING_SNAKE_CASE : List[str] = num_layers
__SCREAMING_SNAKE_CASE : Optional[Any] = num_heads
__SCREAMING_SNAKE_CASE : Tuple = intermediate_size
__SCREAMING_SNAKE_CASE : str = window_size
__SCREAMING_SNAKE_CASE : str = activation_function
__SCREAMING_SNAKE_CASE : Dict = resid_dropout
__SCREAMING_SNAKE_CASE : Dict = embed_dropout
__SCREAMING_SNAKE_CASE : Dict = attention_dropout
__SCREAMING_SNAKE_CASE : Optional[int] = classifier_dropout
__SCREAMING_SNAKE_CASE : List[str] = layer_norm_epsilon
__SCREAMING_SNAKE_CASE : int = initializer_range
__SCREAMING_SNAKE_CASE : Tuple = use_cache
__SCREAMING_SNAKE_CASE : Dict = bos_token_id
__SCREAMING_SNAKE_CASE : Tuple = eos_token_id
__SCREAMING_SNAKE_CASE : str = attention_types
__SCREAMING_SNAKE_CASE : str = self.expand_attention_types_params(lowerCamelCase )
if len(self.attention_layers ) != self.num_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.attention_layers)` == `config.num_layers` """
f"""but is `len(config.attention_layers) = {len(self.attention_layers )}`, """
f"""`config.num_layers = {self.num_layers}`. """
"""`config.attention_layers` is prepared using `config.attention_types`. """
"""Please verify the value of `config.attention_types` argument.""" )
super().__init__(bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase ,**lowerCamelCase )
@staticmethod
def UpperCAmelCase__ ( lowerCamelCase : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = []
for item in attention_types:
for _ in range(item[1] ):
attentions.extend(item[0] )
return attentions
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
import torch
__SCREAMING_SNAKE_CASE : Optional[Any] = input.size()
__SCREAMING_SNAKE_CASE : Optional[Any] = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Tuple = shape[dimension]
__SCREAMING_SNAKE_CASE : List[str] = torch.arange(0 , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Tuple = torch.div(sizedim - size , __UpperCAmelCase , rounding_mode="""floor""" ) + 1
__SCREAMING_SNAKE_CASE : List[str] = torch.arange(__UpperCAmelCase ) + low_indices[:min_length][:, None]
__SCREAMING_SNAKE_CASE : int = [slice(__UpperCAmelCase )] * rank
__SCREAMING_SNAKE_CASE : Dict = indices
__SCREAMING_SNAKE_CASE : List[str] = input[s]
__SCREAMING_SNAKE_CASE : Optional[Any] = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
import torch
__SCREAMING_SNAKE_CASE : Optional[int] = torch.arange(1 , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE : Tuple = torch.remainder(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE : str = remainders == 0
__SCREAMING_SNAKE_CASE : str = candidates[divisor_indices]
__SCREAMING_SNAKE_CASE : Union[str, Any] = torch.max(__UpperCAmelCase )
return largest_divisor, torch.div(__UpperCAmelCase , __UpperCAmelCase , rounding_mode="""floor""" )
class __a ( _snake_case ):
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : int = OrderedDict({"""input_ids""": {0: """batch""", 1: """sequence"""}} )
if self.use_past:
self.fill_with_past_key_values_(lowerCamelCase ,direction="""inputs""" )
__SCREAMING_SNAKE_CASE : List[str] = {0: """batch""", 1: """past_sequence + sequence"""}
else:
__SCREAMING_SNAKE_CASE : Optional[int] = {0: """batch""", 1: """sequence"""}
return common_inputs
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self._config.num_heads
def UpperCAmelCase__ ( self : str ,lowerCamelCase : PreTrainedTokenizer ,lowerCamelCase : int = -1 ,lowerCamelCase : int = -1 ,lowerCamelCase : bool = False ,lowerCamelCase : Optional[TensorType] = None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE : Any = super(lowerCamelCase ,self ).generate_dummy_inputs(
lowerCamelCase ,batch_size=lowerCamelCase ,seq_length=lowerCamelCase ,is_pair=lowerCamelCase ,framework=lowerCamelCase )
# We need to order the input in the way they appears in the forward()
__SCREAMING_SNAKE_CASE : Any = OrderedDict({"""input_ids""": common_inputs["""input_ids"""]} )
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("""Cannot generate dummy past_keys inputs without PyTorch installed.""" )
else:
import torch
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Tuple = common_inputs["""input_ids"""].shape
# Not using the same length for past_key_values
__SCREAMING_SNAKE_CASE : Any = seqlen + 2
__SCREAMING_SNAKE_CASE : int = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
__SCREAMING_SNAKE_CASE : Tuple = [
(torch.zeros(lowerCamelCase ), torch.zeros(lowerCamelCase )) for _ in range(self.num_layers )
]
__SCREAMING_SNAKE_CASE : Dict = common_inputs["""attention_mask"""]
if self.use_past:
__SCREAMING_SNAKE_CASE : Optional[Any] = ordered_inputs["""attention_mask"""].dtype
__SCREAMING_SNAKE_CASE : Optional[int] = torch.cat(
[ordered_inputs["""attention_mask"""], torch.ones(lowerCamelCase ,lowerCamelCase ,dtype=lowerCamelCase )] ,dim=1 )
return ordered_inputs
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return 13
| 716 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a = logging.get_logger(__name__)
a = {
"asapp/sew-tiny-100k": "https://huggingface.co/asapp/sew-tiny-100k/resolve/main/config.json",
# See all SEW models at https://huggingface.co/models?filter=sew
}
class __a ( _snake_case ):
__UpperCamelCase : Tuple = 'sew'
def __init__( self : str ,lowerCamelCase : Any=32 ,lowerCamelCase : str=768 ,lowerCamelCase : str=12 ,lowerCamelCase : Union[str, Any]=12 ,lowerCamelCase : Union[str, Any]=3072 ,lowerCamelCase : int=2 ,lowerCamelCase : Union[str, Any]="gelu" ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Optional[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.1 ,lowerCamelCase : Optional[Any]=0.02 ,lowerCamelCase : List[str]=1E-5 ,lowerCamelCase : Tuple="group" ,lowerCamelCase : Optional[Any]="gelu" ,lowerCamelCase : List[str]=(64, 128, 128, 128, 128, 256, 256, 256, 256, 512, 512, 512, 512) ,lowerCamelCase : Any=(5, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1) ,lowerCamelCase : Dict=(10, 3, 1, 3, 1, 3, 1, 3, 1, 2, 1, 2, 1) ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : Dict=128 ,lowerCamelCase : Union[str, Any]=16 ,lowerCamelCase : List[Any]=True ,lowerCamelCase : List[Any]=0.05 ,lowerCamelCase : Optional[int]=10 ,lowerCamelCase : Any=2 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Tuple=10 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple="mean" ,lowerCamelCase : int=False ,lowerCamelCase : Dict=False ,lowerCamelCase : Optional[int]=256 ,lowerCamelCase : str=0 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : Tuple=2 ,**lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
super().__init__(**lowerCamelCase ,pad_token_id=lowerCamelCase ,bos_token_id=lowerCamelCase ,eos_token_id=lowerCamelCase )
__SCREAMING_SNAKE_CASE = hidden_size
__SCREAMING_SNAKE_CASE = feat_extract_norm
__SCREAMING_SNAKE_CASE = feat_extract_activation
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = list(lowerCamelCase )
__SCREAMING_SNAKE_CASE = conv_bias
__SCREAMING_SNAKE_CASE = num_conv_pos_embeddings
__SCREAMING_SNAKE_CASE = num_conv_pos_embedding_groups
__SCREAMING_SNAKE_CASE = len(self.conv_dim )
__SCREAMING_SNAKE_CASE = num_hidden_layers
__SCREAMING_SNAKE_CASE = intermediate_size
__SCREAMING_SNAKE_CASE = squeeze_factor
__SCREAMING_SNAKE_CASE = hidden_act
__SCREAMING_SNAKE_CASE = num_attention_heads
__SCREAMING_SNAKE_CASE = hidden_dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = feat_proj_dropout
__SCREAMING_SNAKE_CASE = final_dropout
__SCREAMING_SNAKE_CASE = layerdrop
__SCREAMING_SNAKE_CASE = layer_norm_eps
__SCREAMING_SNAKE_CASE = initializer_range
__SCREAMING_SNAKE_CASE = vocab_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect."""
"""It is required that `len(config.conv_dim)` == `len(config.conv_stride)` == `len(config.conv_kernel)`,"""
f"""but is `len(config.conv_dim) = {len(self.conv_dim )}`, `len(config.conv_stride)"""
f"""= {len(self.conv_stride )}`, `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__SCREAMING_SNAKE_CASE = apply_spec_augment
__SCREAMING_SNAKE_CASE = mask_time_prob
__SCREAMING_SNAKE_CASE = mask_time_length
__SCREAMING_SNAKE_CASE = mask_time_min_masks
__SCREAMING_SNAKE_CASE = mask_feature_prob
__SCREAMING_SNAKE_CASE = mask_feature_length
__SCREAMING_SNAKE_CASE = mask_feature_min_masks
# ctc loss
__SCREAMING_SNAKE_CASE = ctc_loss_reduction
__SCREAMING_SNAKE_CASE = ctc_zero_infinity
# sequence classification
__SCREAMING_SNAKE_CASE = use_weighted_layer_sum
__SCREAMING_SNAKE_CASE = classifier_proj_size
@property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return functools.reduce(operator.mul ,self.conv_stride ,1 )
| 13 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
a = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 717 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase = 1 , __UpperCAmelCase = 1000 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 0
for divide_by_number in range(__UpperCAmelCase , digit + 1 ):
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = numerator
for _ in range(1 , digit + 1 ):
if now_divide in has_been_divided:
if longest_list_length < len(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = divide_by_number
else:
has_been_divided.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = now_divide * 10 % divide_by_number
return the_digit
# Tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 13 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> str:
'''simple docstring'''
if a < 0 or b < 0:
raise ValueError("""the value of both inputs must be positive""" )
__SCREAMING_SNAKE_CASE = str(bin(__UpperCAmelCase ) )[2:] # remove the leading "0b"
__SCREAMING_SNAKE_CASE = str(bin(__UpperCAmelCase ) )[2:] # remove the leading "0b"
__SCREAMING_SNAKE_CASE = max(len(__UpperCAmelCase ) , len(__UpperCAmelCase ) )
return "0b" + "".join(
str(int(char_a == """1""" and char_b == """1""" ) )
for char_a, char_b in zip(a_binary.zfill(__UpperCAmelCase ) , b_binary.zfill(__UpperCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor
class __a ( unittest.TestCase ):
def __init__( self : List[Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str]=7 ,lowerCamelCase : List[str]=3 ,lowerCamelCase : List[str]=18 ,lowerCamelCase : Any=30 ,lowerCamelCase : Optional[Any]=400 ,lowerCamelCase : Optional[Any]=True ,lowerCamelCase : Optional[Any]=None ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=None ,lowerCamelCase : str=True ,lowerCamelCase : Dict=[0.48_145_466, 0.4_578_275, 0.40_821_073] ,lowerCamelCase : List[str]=[0.26_862_954, 0.26_130_258, 0.27_577_711] ,lowerCamelCase : Tuple=True ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = size if size is not None else {"""height""": 224, """width""": 224}
__SCREAMING_SNAKE_CASE = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
__SCREAMING_SNAKE_CASE = parent
__SCREAMING_SNAKE_CASE = batch_size
__SCREAMING_SNAKE_CASE = num_channels
__SCREAMING_SNAKE_CASE = image_size
__SCREAMING_SNAKE_CASE = min_resolution
__SCREAMING_SNAKE_CASE = max_resolution
__SCREAMING_SNAKE_CASE = do_resize
__SCREAMING_SNAKE_CASE = size
__SCREAMING_SNAKE_CASE = do_center_crop
__SCREAMING_SNAKE_CASE = crop_size
__SCREAMING_SNAKE_CASE = do_normalize
__SCREAMING_SNAKE_CASE = image_mean
__SCREAMING_SNAKE_CASE = image_std
__SCREAMING_SNAKE_CASE = do_convert_rgb
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_convert_rgb": self.do_convert_rgb,
}
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Union[str, Any]=False ,lowerCamelCase : str=False ,lowerCamelCase : str=False ):
'''simple docstring'''
assert not (numpify and torchify), "You cannot specify both numpy and PyTorch tensors at the same time"
if equal_resolution:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
image_inputs.append(
np.random.randint(
255 ,size=(self.num_channels, self.max_resolution, self.max_resolution) ,dtype=np.uinta ) )
else:
__SCREAMING_SNAKE_CASE = []
for i in range(self.batch_size ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = np.random.choice(np.arange(self.min_resolution ,self.max_resolution ) ,2 )
image_inputs.append(np.random.randint(255 ,size=(self.num_channels, width, height) ,dtype=np.uinta ) )
if not numpify and not torchify:
# PIL expects the channel dimension as last dimension
__SCREAMING_SNAKE_CASE = [Image.fromarray(np.moveaxis(lowerCamelCase ,0 ,-1 ) ) for x in image_inputs]
if torchify:
__SCREAMING_SNAKE_CASE = [torch.from_numpy(lowerCamelCase ) for x in image_inputs]
return image_inputs
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : int = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,do_center_crop=lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{"""height""": 224, """width""": 224} )
self.assertEqual(image_processor.crop_size ,{"""height""": 18, """width""": 18} )
__SCREAMING_SNAKE_CASE = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 ,crop_size=84 )
self.assertEqual(image_processor.size ,{"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size ,{"""height""": 84, """width""": 84} )
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,numpify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,np.ndarray )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
def UpperCAmelCase__ ( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase ,torchify=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,torch.Tensor )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
@require_torch
@require_vision
class __a ( _snake_case, unittest.TestCase ):
__UpperCamelCase : Optional[int] = ChineseCLIPImageProcessor if is_vision_available() else None
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = ChineseCLIPImageProcessingTester(self ,num_channels=4 ,do_center_crop=lowerCamelCase )
__SCREAMING_SNAKE_CASE = 3
@property
def UpperCAmelCase__ ( self : Dict ):
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCamelCase ,"""do_resize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""size""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""center_crop""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_normalize""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_mean""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""image_std""" ) )
self.assertTrue(hasattr(lowerCamelCase ,"""do_convert_rgb""" ) )
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
pass
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
__SCREAMING_SNAKE_CASE = self.image_processor_tester.prepare_inputs(equal_resolution=lowerCamelCase )
for image in image_inputs:
self.assertIsInstance(lowerCamelCase ,Image.Image )
# Test not batched input
__SCREAMING_SNAKE_CASE = image_processing(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
# Test batched
__SCREAMING_SNAKE_CASE = image_processing(lowerCamelCase ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.expected_encoded_image_num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) ,)
| 13 | 0 |
'''simple docstring'''
def __magic_name__ ( ) -> int:
'''simple docstring'''
return 1
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase = 200 ) -> int:
'''simple docstring'''
return two_pound(__UpperCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip())))
| 719 |
'''simple docstring'''
import timeit
import numpy as np
import datasets
from datasets.arrow_writer import ArrowWriter
from datasets.features.features import _ArrayXD
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
def wrapper(*__UpperCAmelCase , **__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = timeit.default_timer()
__SCREAMING_SNAKE_CASE = func(*__UpperCAmelCase , **__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = timeit.default_timer() - starttime
return delta
__SCREAMING_SNAKE_CASE = func.__name__
return wrapper
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = seq_shapes or {}
for i in range(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = {}
for col_id, (k, v) in enumerate(features.items() ):
if isinstance(__UpperCAmelCase , _ArrayXD ):
__SCREAMING_SNAKE_CASE = np.random.rand(*v.shape ).astype(v.dtype )
elif isinstance(__UpperCAmelCase , datasets.Value ):
if v.dtype == "string":
__SCREAMING_SNAKE_CASE = """The small grey turtle was surprisingly fast when challenged."""
else:
__SCREAMING_SNAKE_CASE = np.random.randint(10 , size=1 ).astype(v.dtype ).item()
elif isinstance(__UpperCAmelCase , datasets.Sequence ):
while isinstance(__UpperCAmelCase , datasets.Sequence ):
__SCREAMING_SNAKE_CASE = v.feature
__SCREAMING_SNAKE_CASE = seq_shapes[k]
__SCREAMING_SNAKE_CASE = np.random.rand(*__UpperCAmelCase ).astype(v.dtype )
__SCREAMING_SNAKE_CASE = data
dummy_data.append((i, example) )
return dummy_data
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=100 , __UpperCAmelCase=None ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = generate_examples(__UpperCAmelCase , num_examples=__UpperCAmelCase , seq_shapes=__UpperCAmelCase )
with ArrowWriter(features=__UpperCAmelCase , path=__UpperCAmelCase ) as writer:
for key, record in dummy_data:
__SCREAMING_SNAKE_CASE = features.encode_example(__UpperCAmelCase )
writer.write(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = writer.finalize()
if not num_final_examples == num_examples:
raise ValueError(
f"""Error writing the dataset, wrote {num_final_examples} examples but should have written {num_examples}.""" )
__SCREAMING_SNAKE_CASE = datasets.Dataset.from_file(filename=__UpperCAmelCase , info=datasets.DatasetInfo(features=__UpperCAmelCase ) )
return dataset
| 13 | 0 |
'''simple docstring'''
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
a = collections.namedtuple("_Datasets", ["train", "validation", "test"])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
a = "https://storage.googleapis.com/cvdf-datasets/mnist/"
def __magic_name__ ( __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = numpy.dtype(numpy.uintaa ).newbyteorder(""">""" )
return numpy.frombuffer(bytestream.read(4 ) , dtype=__UpperCAmelCase )[0]
@deprecated(__UpperCAmelCase , """Please use tf.data to implement this functionality.""" )
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=__UpperCAmelCase ) as bytestream:
__SCREAMING_SNAKE_CASE = _readaa(__UpperCAmelCase )
if magic != 2051:
raise ValueError(
"""Invalid magic number %d in MNIST image file: %s""" % (magic, f.name) )
__SCREAMING_SNAKE_CASE = _readaa(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = _readaa(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = _readaa(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = bytestream.read(rows * cols * num_images )
__SCREAMING_SNAKE_CASE = numpy.frombuffer(__UpperCAmelCase , dtype=numpy.uinta )
__SCREAMING_SNAKE_CASE = data.reshape(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , 1 )
return data
@deprecated(__UpperCAmelCase , """Please use tf.one_hot on tensors.""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = labels_dense.shape[0]
__SCREAMING_SNAKE_CASE = numpy.arange(__UpperCAmelCase ) * num_classes
__SCREAMING_SNAKE_CASE = numpy.zeros((num_labels, num_classes) )
__SCREAMING_SNAKE_CASE = 1
return labels_one_hot
@deprecated(__UpperCAmelCase , """Please use tf.data to implement this functionality.""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=10 ) -> Optional[Any]:
'''simple docstring'''
print("""Extracting""" , f.name )
with gzip.GzipFile(fileobj=__UpperCAmelCase ) as bytestream:
__SCREAMING_SNAKE_CASE = _readaa(__UpperCAmelCase )
if magic != 2049:
raise ValueError(
"""Invalid magic number %d in MNIST label file: %s""" % (magic, f.name) )
__SCREAMING_SNAKE_CASE = _readaa(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = bytestream.read(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = numpy.frombuffer(__UpperCAmelCase , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(__UpperCAmelCase , __UpperCAmelCase )
return labels
class __a :
@deprecated(
lowerCamelCase ,"""Please use alternatives such as official/mnist/_DataSet.py"""
""" from tensorflow/models.""" ,)
def __init__( self : int ,lowerCamelCase : List[str] ,lowerCamelCase : int ,lowerCamelCase : Dict=False ,lowerCamelCase : Optional[int]=False ,lowerCamelCase : List[str]=dtypes.floataa ,lowerCamelCase : Tuple=True ,lowerCamelCase : List[Any]=None ,):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = random_seed.get_seed(lowerCamelCase )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
__SCREAMING_SNAKE_CASE = dtypes.as_dtype(lowerCamelCase ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError("""Invalid image dtype %r, expected uint8 or float32""" % dtype )
if fake_data:
__SCREAMING_SNAKE_CASE = 1_0000
__SCREAMING_SNAKE_CASE = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), f"""images.shape: {images.shape} labels.shape: {labels.shape}"""
__SCREAMING_SNAKE_CASE = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
__SCREAMING_SNAKE_CASE = images.reshape(
images.shape[0] ,images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
__SCREAMING_SNAKE_CASE = images.astype(numpy.floataa )
__SCREAMING_SNAKE_CASE = numpy.multiply(lowerCamelCase ,1.0 / 255.0 )
__SCREAMING_SNAKE_CASE = images
__SCREAMING_SNAKE_CASE = labels
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = 0
@property
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
return self._images
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self._labels
@property
def UpperCAmelCase__ ( self : List[str] ):
'''simple docstring'''
return self._num_examples
@property
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
return self._epochs_completed
def UpperCAmelCase__ ( self : int ,lowerCamelCase : List[str] ,lowerCamelCase : Any=False ,lowerCamelCase : Dict=True ):
'''simple docstring'''
if fake_data:
__SCREAMING_SNAKE_CASE = [1] * 784
__SCREAMING_SNAKE_CASE = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(lowerCamelCase )],
[fake_label for _ in range(lowerCamelCase )],
)
__SCREAMING_SNAKE_CASE = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
__SCREAMING_SNAKE_CASE = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.images[perma]
__SCREAMING_SNAKE_CASE = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
__SCREAMING_SNAKE_CASE = self._num_examples - start
__SCREAMING_SNAKE_CASE = self._images[start : self._num_examples]
__SCREAMING_SNAKE_CASE = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
__SCREAMING_SNAKE_CASE = numpy.arange(self._num_examples )
numpy.random.shuffle(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.images[perm]
__SCREAMING_SNAKE_CASE = self.labels[perm]
# Start next epoch
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = batch_size - rest_num_examples
__SCREAMING_SNAKE_CASE = self._index_in_epoch
__SCREAMING_SNAKE_CASE = self._images[start:end]
__SCREAMING_SNAKE_CASE = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) ,axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) ,axis=0 ),
)
else:
self._index_in_epoch += batch_size
__SCREAMING_SNAKE_CASE = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(__UpperCAmelCase , """Please write your own downloading logic.""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
if not gfile.Exists(__UpperCAmelCase ):
gfile.MakeDirs(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = os.path.join(__UpperCAmelCase , __UpperCAmelCase )
if not gfile.Exists(__UpperCAmelCase ):
urllib.request.urlretrieve(__UpperCAmelCase , __UpperCAmelCase ) # noqa: S310
with gfile.GFile(__UpperCAmelCase ) as f:
__SCREAMING_SNAKE_CASE = f.size()
print("""Successfully downloaded""" , __UpperCAmelCase , __UpperCAmelCase , """bytes.""" )
return filepath
@deprecated(
__UpperCAmelCase , """Please use alternatives such as:""" """ tensorflow_datasets.load('mnist')""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=False , __UpperCAmelCase=False , __UpperCAmelCase=dtypes.floataa , __UpperCAmelCase=True , __UpperCAmelCase=5000 , __UpperCAmelCase=None , __UpperCAmelCase=DEFAULT_SOURCE_URL , ) -> int:
'''simple docstring'''
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=__UpperCAmelCase , one_hot=__UpperCAmelCase , dtype=__UpperCAmelCase , seed=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = fake()
__SCREAMING_SNAKE_CASE = fake()
__SCREAMING_SNAKE_CASE = fake()
return _Datasets(train=__UpperCAmelCase , validation=__UpperCAmelCase , test=__UpperCAmelCase )
if not source_url: # empty string check
__SCREAMING_SNAKE_CASE = DEFAULT_SOURCE_URL
__SCREAMING_SNAKE_CASE = """train-images-idx3-ubyte.gz"""
__SCREAMING_SNAKE_CASE = """train-labels-idx1-ubyte.gz"""
__SCREAMING_SNAKE_CASE = """t10k-images-idx3-ubyte.gz"""
__SCREAMING_SNAKE_CASE = """t10k-labels-idx1-ubyte.gz"""
__SCREAMING_SNAKE_CASE = _maybe_download(
__UpperCAmelCase , __UpperCAmelCase , source_url + train_images_file )
with gfile.Open(__UpperCAmelCase , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = _extract_images(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = _maybe_download(
__UpperCAmelCase , __UpperCAmelCase , source_url + train_labels_file )
with gfile.Open(__UpperCAmelCase , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = _extract_labels(__UpperCAmelCase , one_hot=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = _maybe_download(
__UpperCAmelCase , __UpperCAmelCase , source_url + test_images_file )
with gfile.Open(__UpperCAmelCase , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = _extract_images(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = _maybe_download(
__UpperCAmelCase , __UpperCAmelCase , source_url + test_labels_file )
with gfile.Open(__UpperCAmelCase , """rb""" ) as f:
__SCREAMING_SNAKE_CASE = _extract_labels(__UpperCAmelCase , one_hot=__UpperCAmelCase )
if not 0 <= validation_size <= len(__UpperCAmelCase ):
__SCREAMING_SNAKE_CASE = (
"""Validation size should be between 0 and """
f"""{len(__UpperCAmelCase )}. Received: {validation_size}."""
)
raise ValueError(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = train_images[:validation_size]
__SCREAMING_SNAKE_CASE = train_labels[:validation_size]
__SCREAMING_SNAKE_CASE = train_images[validation_size:]
__SCREAMING_SNAKE_CASE = train_labels[validation_size:]
__SCREAMING_SNAKE_CASE = {"""dtype""": dtype, """reshape""": reshape, """seed""": seed}
__SCREAMING_SNAKE_CASE = _DataSet(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = _DataSet(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = _DataSet(__UpperCAmelCase , __UpperCAmelCase , **__UpperCAmelCase )
return _Datasets(train=__UpperCAmelCase , validation=__UpperCAmelCase , test=__UpperCAmelCase )
| 720 |
'''simple docstring'''
import time
from contextlib import contextmanager
from pathlib import Path
import pytest
import requests
from huggingface_hub.hf_api import HfApi, HfFolder
a = "__DUMMY_TRANSFORMERS_USER__"
a = "Dummy User"
a = "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"
a = "https://hub-ci.huggingface.co"
a = CI_HUB_ENDPOINT + "/datasets/{repo_id}/resolve/{revision}/{path}"
a = CI_HUB_ENDPOINT + "/{repo_id}/resolve/{revision}/{filename}"
a = Path("~/.huggingface/hub_ci_token").expanduser()
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
monkeypatch.setattr(
"""huggingface_hub.file_download.HUGGINGFACE_CO_URL_TEMPLATE""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""datasets.config.HF_ENDPOINT""" , __UpperCAmelCase )
monkeypatch.setattr("""datasets.config.HUB_DATASETS_URL""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
monkeypatch.setattr("""huggingface_hub.hf_api.HfFolder.path_token""" , __UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
HfFolder.save_token(__UpperCAmelCase )
yield
HfFolder.delete_token()
@pytest.fixture(scope="""session""" )
def __magic_name__ ( ) -> Optional[Any]:
'''simple docstring'''
return HfApi(endpoint=__UpperCAmelCase )
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfFolder.get_token()
HfFolder.save_token(__UpperCAmelCase )
yield CI_HUB_USER_TOKEN
if previous_token is not None:
HfFolder.save_token(__UpperCAmelCase )
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> Dict:
'''simple docstring'''
def _cleanup_repo(__UpperCAmelCase ):
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
return _cleanup_repo
@pytest.fixture
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
@contextmanager
def _temporary_repo(__UpperCAmelCase ):
try:
yield repo_id
finally:
cleanup_repo(__UpperCAmelCase )
return _temporary_repo
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data/text_data.txt""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
return hf_private_dataset_repo_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_txt_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Tuple:
'''simple docstring'''
return hf_private_dataset_repo_zipped_txt_data_
@pytest.fixture(scope="""session""" )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""repo_zipped_img_data-{int(time.time() * 1_0e3 )}"""
__SCREAMING_SNAKE_CASE = f"""{CI_HUB_USER}/{repo_name}"""
hf_api.create_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" , private=__UpperCAmelCase )
hf_api.upload_file(
token=__UpperCAmelCase , path_or_fileobj=str(__UpperCAmelCase ) , path_in_repo="""data.zip""" , repo_id=__UpperCAmelCase , repo_type="""dataset""" , )
yield repo_id
try:
hf_api.delete_repo(__UpperCAmelCase , token=__UpperCAmelCase , repo_type="""dataset""" )
except (requests.exceptions.HTTPError, ValueError): # catch http error and token invalid error
pass
@pytest.fixture()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> Dict:
'''simple docstring'''
return hf_private_dataset_repo_zipped_img_data_
| 13 | 0 |
'''simple docstring'''
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
while num > 0:
digit_sum += num % 10
num //= 10
return digit_sum
def __magic_name__ ( __UpperCAmelCase = 100 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = 2
for i in range(2 , max_n + 1 ):
__SCREAMING_SNAKE_CASE = pre_numerator
__SCREAMING_SNAKE_CASE = 2 * i // 3 if i % 3 == 0 else 1
__SCREAMING_SNAKE_CASE = cur_numerator
__SCREAMING_SNAKE_CASE = e_cont * pre_numerator + temp
return sum_digits(__UpperCAmelCase )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 721 |
'''simple docstring'''
import copy
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ..auto import CONFIG_MAPPING
a = logging.get_logger(__name__)
a = {
"ut/deta": "https://huggingface.co/ut/deta/resolve/main/config.json",
}
class __a ( _snake_case ):
__UpperCamelCase : Dict = 'deta'
__UpperCamelCase : List[str] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
}
def __init__( self : Tuple ,lowerCamelCase : List[Any]=None ,lowerCamelCase : Any=900 ,lowerCamelCase : int=2048 ,lowerCamelCase : Any=6 ,lowerCamelCase : Optional[Any]=2048 ,lowerCamelCase : str=8 ,lowerCamelCase : Union[str, Any]=6 ,lowerCamelCase : List[str]=1024 ,lowerCamelCase : int=8 ,lowerCamelCase : Any=0.0 ,lowerCamelCase : Any=True ,lowerCamelCase : Optional[int]="relu" ,lowerCamelCase : int=256 ,lowerCamelCase : Tuple=0.1 ,lowerCamelCase : Optional[Any]=0.0 ,lowerCamelCase : Tuple=0.0 ,lowerCamelCase : List[str]=0.02 ,lowerCamelCase : Any=1.0 ,lowerCamelCase : Optional[int]=True ,lowerCamelCase : int=False ,lowerCamelCase : Optional[Any]="sine" ,lowerCamelCase : Dict=5 ,lowerCamelCase : List[Any]=4 ,lowerCamelCase : Optional[Any]=4 ,lowerCamelCase : Any=True ,lowerCamelCase : int=300 ,lowerCamelCase : Any=True ,lowerCamelCase : Tuple=True ,lowerCamelCase : int=1 ,lowerCamelCase : Tuple=5 ,lowerCamelCase : Union[str, Any]=2 ,lowerCamelCase : Tuple=1 ,lowerCamelCase : int=1 ,lowerCamelCase : str=5 ,lowerCamelCase : Optional[Any]=2 ,lowerCamelCase : List[Any]=0.1 ,lowerCamelCase : Union[str, Any]=0.25 ,**lowerCamelCase : int ,):
'''simple docstring'''
if backbone_config is None:
logger.info("""`backbone_config` is `None`. Initializing the config with the default `ResNet` backbone.""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING["""resnet"""](out_features=["""stage2""", """stage3""", """stage4"""] )
else:
if isinstance(lowerCamelCase ,lowerCamelCase ):
__SCREAMING_SNAKE_CASE = backbone_config.pop("""model_type""" )
__SCREAMING_SNAKE_CASE = CONFIG_MAPPING[backbone_model_type]
__SCREAMING_SNAKE_CASE = config_class.from_dict(lowerCamelCase )
__SCREAMING_SNAKE_CASE = backbone_config
__SCREAMING_SNAKE_CASE = num_queries
__SCREAMING_SNAKE_CASE = max_position_embeddings
__SCREAMING_SNAKE_CASE = d_model
__SCREAMING_SNAKE_CASE = encoder_ffn_dim
__SCREAMING_SNAKE_CASE = encoder_layers
__SCREAMING_SNAKE_CASE = encoder_attention_heads
__SCREAMING_SNAKE_CASE = decoder_ffn_dim
__SCREAMING_SNAKE_CASE = decoder_layers
__SCREAMING_SNAKE_CASE = decoder_attention_heads
__SCREAMING_SNAKE_CASE = dropout
__SCREAMING_SNAKE_CASE = attention_dropout
__SCREAMING_SNAKE_CASE = activation_dropout
__SCREAMING_SNAKE_CASE = activation_function
__SCREAMING_SNAKE_CASE = init_std
__SCREAMING_SNAKE_CASE = init_xavier_std
__SCREAMING_SNAKE_CASE = encoder_layerdrop
__SCREAMING_SNAKE_CASE = auxiliary_loss
__SCREAMING_SNAKE_CASE = position_embedding_type
# deformable attributes
__SCREAMING_SNAKE_CASE = num_feature_levels
__SCREAMING_SNAKE_CASE = encoder_n_points
__SCREAMING_SNAKE_CASE = decoder_n_points
__SCREAMING_SNAKE_CASE = two_stage
__SCREAMING_SNAKE_CASE = two_stage_num_proposals
__SCREAMING_SNAKE_CASE = with_box_refine
__SCREAMING_SNAKE_CASE = assign_first_stage
if two_stage is True and with_box_refine is False:
raise ValueError("""If two_stage is True, with_box_refine must be True.""" )
# Hungarian matcher
__SCREAMING_SNAKE_CASE = class_cost
__SCREAMING_SNAKE_CASE = bbox_cost
__SCREAMING_SNAKE_CASE = giou_cost
# Loss coefficients
__SCREAMING_SNAKE_CASE = mask_loss_coefficient
__SCREAMING_SNAKE_CASE = dice_loss_coefficient
__SCREAMING_SNAKE_CASE = bbox_loss_coefficient
__SCREAMING_SNAKE_CASE = giou_loss_coefficient
__SCREAMING_SNAKE_CASE = eos_coefficient
__SCREAMING_SNAKE_CASE = focal_alpha
super().__init__(is_encoder_decoder=lowerCamelCase ,**lowerCamelCase )
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
return self.encoder_attention_heads
@property
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
return self.d_model
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = copy.deepcopy(self.__dict__ )
__SCREAMING_SNAKE_CASE = self.backbone_config.to_dict()
__SCREAMING_SNAKE_CASE = self.__class__.model_type
return output
| 13 | 0 |
from ..utils import DummyObject, requires_backends
class __a ( metaclass=_snake_case ):
__UpperCamelCase : Tuple = ['note_seq']
def __init__( self : Optional[Any] ,*lowerCamelCase : Tuple ,**lowerCamelCase : Any ):
'''simple docstring'''
requires_backends(self ,["""note_seq"""] )
@classmethod
def UpperCAmelCase__ ( cls : Dict ,*lowerCamelCase : Tuple ,**lowerCamelCase : Optional[Any] ):
'''simple docstring'''
requires_backends(cls ,["""note_seq"""] )
@classmethod
def UpperCAmelCase__ ( cls : List[str] ,*lowerCamelCase : Optional[Any] ,**lowerCamelCase : str ):
'''simple docstring'''
requires_backends(cls ,["""note_seq"""] )
| 700 |
'''simple docstring'''
import flax.linen as nn
import jax
import jax.numpy as jnp
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hidden_states.shape
__SCREAMING_SNAKE_CASE = jax.image.resize(
lowerCamelCase ,shape=(batch, height * 2, width * 2, channels) ,method="""nearest""" ,)
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Optional[int] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = nn.Conv(
self.out_channels ,kernel_size=(3, 3) ,strides=(2, 2) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.conv(lowerCamelCase )
return hidden_states
class __a ( nn.Module ):
__UpperCamelCase : int
__UpperCamelCase : int = None
__UpperCamelCase : float = 0.0
__UpperCamelCase : bool = None
__UpperCamelCase : jnp.dtype = jnp.floataa
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.in_channels if self.out_channels is None else self.out_channels
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = nn.Dense(lowerCamelCase ,dtype=self.dtype )
__SCREAMING_SNAKE_CASE = nn.GroupNorm(num_groups=32 ,epsilon=1E-5 )
__SCREAMING_SNAKE_CASE = nn.Dropout(self.dropout_prob )
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(3, 3) ,strides=(1, 1) ,padding=((1, 1), (1, 1)) ,dtype=self.dtype ,)
__SCREAMING_SNAKE_CASE = self.in_channels != out_channels if self.use_nin_shortcut is None else self.use_nin_shortcut
__SCREAMING_SNAKE_CASE = None
if use_nin_shortcut:
__SCREAMING_SNAKE_CASE = nn.Conv(
lowerCamelCase ,kernel_size=(1, 1) ,strides=(1, 1) ,padding="""VALID""" ,dtype=self.dtype ,)
def __call__( self : List[str] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Tuple ,lowerCamelCase : Union[str, Any]=True ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = hidden_states
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.time_emb_proj(nn.swish(lowerCamelCase ) )
__SCREAMING_SNAKE_CASE = jnp.expand_dims(jnp.expand_dims(lowerCamelCase ,1 ) ,1 )
__SCREAMING_SNAKE_CASE = hidden_states + temb
__SCREAMING_SNAKE_CASE = self.norma(lowerCamelCase )
__SCREAMING_SNAKE_CASE = nn.swish(lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.dropout(lowerCamelCase ,lowerCamelCase )
__SCREAMING_SNAKE_CASE = self.conva(lowerCamelCase )
if self.conv_shortcut is not None:
__SCREAMING_SNAKE_CASE = self.conv_shortcut(lowerCamelCase )
return hidden_states + residual
| 13 | 0 |
'''simple docstring'''
import math
from numpy import inf
from scipy.integrate import quad
def __magic_name__ ( __UpperCAmelCase ) -> float:
'''simple docstring'''
if num <= 0:
raise ValueError("""math domain error""" )
return quad(__UpperCAmelCase , 0 , __UpperCAmelCase , args=(__UpperCAmelCase) )[0]
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> float:
'''simple docstring'''
return math.pow(__UpperCAmelCase , z - 1 ) * math.exp(-x )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 701 |
'''simple docstring'''
import sys
from collections import defaultdict
class __a :
def __init__( self : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : List[Any] ):
'''simple docstring'''
return self.node_position[vertex]
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : str ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = pos
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : Optional[int] ,lowerCamelCase : Union[str, Any] ,lowerCamelCase : List[Any] ,lowerCamelCase : Any ):
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
__SCREAMING_SNAKE_CASE = 2 * start + 1
else:
__SCREAMING_SNAKE_CASE = 2 * start + 2
if heap[smallest_child] < heap[start]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = heap[smallest_child], positions[smallest_child]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
heap[start],
positions[start],
)
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = temp, tempa
__SCREAMING_SNAKE_CASE = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,lowerCamelCase )
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,lowerCamelCase ,lowerCamelCase )
def UpperCAmelCase__ ( self : Any ,lowerCamelCase : int ,lowerCamelCase : List[str] ,lowerCamelCase : Optional[Any] ,lowerCamelCase : Tuple ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = position[index]
while index != 0:
__SCREAMING_SNAKE_CASE = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
__SCREAMING_SNAKE_CASE = heap[parent]
__SCREAMING_SNAKE_CASE = position[parent]
self.set_position(position[parent] ,lowerCamelCase )
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,lowerCamelCase )
break
__SCREAMING_SNAKE_CASE = parent
else:
__SCREAMING_SNAKE_CASE = val
__SCREAMING_SNAKE_CASE = temp
self.set_position(lowerCamelCase ,0 )
def UpperCAmelCase__ ( self : Tuple ,lowerCamelCase : List[Any] ,lowerCamelCase : List[str] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = len(lowerCamelCase ) // 2 - 1
for i in range(lowerCamelCase ,-1 ,-1 ):
self.top_to_bottom(lowerCamelCase ,lowerCamelCase ,len(lowerCamelCase ) ,lowerCamelCase )
def UpperCAmelCase__ ( self : int ,lowerCamelCase : Optional[int] ,lowerCamelCase : Dict ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = positions[0]
__SCREAMING_SNAKE_CASE = sys.maxsize
self.top_to_bottom(lowerCamelCase ,0 ,len(lowerCamelCase ) ,lowerCamelCase )
return temp
def __magic_name__ ( __UpperCAmelCase ) -> Optional[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = Heap()
__SCREAMING_SNAKE_CASE = [0] * len(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [-1] * len(__UpperCAmelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
__SCREAMING_SNAKE_CASE = [] # Heap of Distance of vertices from their neighboring vertex
__SCREAMING_SNAKE_CASE = []
for vertex in range(len(__UpperCAmelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(__UpperCAmelCase )
heap.node_position.append(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = 1
__SCREAMING_SNAKE_CASE = sys.maxsize
for neighbor, distance in adjacency_list[0]:
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = distance
heap.heapify(__UpperCAmelCase , __UpperCAmelCase )
for _ in range(1 , len(__UpperCAmelCase ) ):
__SCREAMING_SNAKE_CASE = heap.delete_minimum(__UpperCAmelCase , __UpperCAmelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
__SCREAMING_SNAKE_CASE = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(__UpperCAmelCase )]
):
__SCREAMING_SNAKE_CASE = distance
heap.bottom_to_top(
__UpperCAmelCase , heap.get_position(__UpperCAmelCase ) , __UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
a = int(input("Enter number of edges: ").strip())
a = defaultdict(list)
for _ in range(edges_number):
a = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list))
| 13 | 0 |
'''simple docstring'''
import argparse
import requests
import torch
# pip3 install salesforce-lavis
# I'm actually installing a slightly modified version: pip3 install git+https://github.com/nielsrogge/LAVIS.git@fix_lavis
from lavis.models import load_model_and_preprocess
from PIL import Image
from transformers import (
AutoTokenizer,
BlipaConfig,
BlipaForConditionalGeneration,
BlipaProcessor,
BlipaVisionConfig,
BlipImageProcessor,
OPTConfig,
TaConfig,
)
from transformers.utils.constants import OPENAI_CLIP_MEAN, OPENAI_CLIP_STD
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = """https://storage.googleapis.com/sfr-vision-language-research/LAVIS/assets/merlion.png"""
__SCREAMING_SNAKE_CASE = Image.open(requests.get(__UpperCAmelCase , stream=__UpperCAmelCase ).raw ).convert("""RGB""" )
return image
def __magic_name__ ( __UpperCAmelCase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
# fmt: off
# vision encoder
rename_keys.append(("""visual_encoder.cls_token""", """vision_model.embeddings.class_embedding""") )
rename_keys.append(("""visual_encoder.pos_embed""", """vision_model.embeddings.position_embedding""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.weight""", """vision_model.embeddings.patch_embedding.weight""") )
rename_keys.append(("""visual_encoder.patch_embed.proj.bias""", """vision_model.embeddings.patch_embedding.bias""") )
rename_keys.append(("""ln_vision.weight""", """vision_model.post_layernorm.weight""") )
rename_keys.append(("""ln_vision.bias""", """vision_model.post_layernorm.bias""") )
for i in range(config.vision_config.num_hidden_layers ):
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.weight""", f"""vision_model.encoder.layers.{i}.layer_norm1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm1.bias""", f"""vision_model.encoder.layers.{i}.layer_norm1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.weight""", f"""vision_model.encoder.layers.{i}.layer_norm2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.norm2.bias""", f"""vision_model.encoder.layers.{i}.layer_norm2.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.qkv.weight""", f"""vision_model.encoder.layers.{i}.self_attn.qkv.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.weight""", f"""vision_model.encoder.layers.{i}.self_attn.projection.weight""",) )
rename_keys.append((f"""visual_encoder.blocks.{i}.attn.proj.bias""", f"""vision_model.encoder.layers.{i}.self_attn.projection.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc1.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc1.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc1.bias""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.weight""", f"""vision_model.encoder.layers.{i}.mlp.fc2.weight""") )
rename_keys.append((f"""visual_encoder.blocks.{i}.mlp.fc2.bias""", f"""vision_model.encoder.layers.{i}.mlp.fc2.bias""") )
# QFormer
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.weight""", """qformer.layernorm.weight""") )
rename_keys.append(("""Qformer.bert.embeddings.LayerNorm.bias""", """qformer.layernorm.bias""") )
# fmt: on
return rename_keys
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> List[Any]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = dct.pop(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = val
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Union[str, Any]:
'''simple docstring'''
for i in range(config.vision_config.num_hidden_layers ):
# read in original q and v biases
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.q_bias""" )
__SCREAMING_SNAKE_CASE = state_dict.pop(f"""visual_encoder.blocks.{i}.attn.v_bias""" )
# next, set bias in the state dict
__SCREAMING_SNAKE_CASE = torch.cat((q_bias, torch.zeros_like(__UpperCAmelCase , requires_grad=__UpperCAmelCase ), v_bias) )
__SCREAMING_SNAKE_CASE = qkv_bias
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> Any:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 364 if """coco""" in model_name else 224
__SCREAMING_SNAKE_CASE = BlipaVisionConfig(image_size=__UpperCAmelCase ).to_dict()
# make sure the models have proper bos_token_id and eos_token_id set (important for generation)
# seems like flan-T5 models don't have bos_token_id properly set?
if "opt-2.7b" in model_name:
__SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained("""facebook/opt-2.7b""" , eos_token_id=__UpperCAmelCase ).to_dict()
elif "opt-6.7b" in model_name:
__SCREAMING_SNAKE_CASE = OPTConfig.from_pretrained("""facebook/opt-6.7b""" , eos_token_id=__UpperCAmelCase ).to_dict()
elif "t5-xl" in model_name:
__SCREAMING_SNAKE_CASE = TaConfig.from_pretrained("""google/flan-t5-xl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
elif "t5-xxl" in model_name:
__SCREAMING_SNAKE_CASE = TaConfig.from_pretrained("""google/flan-t5-xxl""" , dense_act_fn="""gelu""" , bos_token_id=1 ).to_dict()
__SCREAMING_SNAKE_CASE = BlipaConfig(vision_config=__UpperCAmelCase , text_config=__UpperCAmelCase )
return config, image_size
@torch.no_grad()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase=None , __UpperCAmelCase=False ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = (
AutoTokenizer.from_pretrained("""facebook/opt-2.7b""" )
if """opt""" in model_name
else AutoTokenizer.from_pretrained("""google/flan-t5-xl""" )
)
__SCREAMING_SNAKE_CASE = tokenizer("""\n""" , add_special_tokens=__UpperCAmelCase ).input_ids[0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_blipa_config(__UpperCAmelCase , eos_token_id=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = BlipaForConditionalGeneration(__UpperCAmelCase ).eval()
__SCREAMING_SNAKE_CASE = {
"""blip2-opt-2.7b""": ("""blip2_opt""", """pretrain_opt2.7b"""),
"""blip2-opt-6.7b""": ("""blip2_opt""", """pretrain_opt6.7b"""),
"""blip2-opt-2.7b-coco""": ("""blip2_opt""", """caption_coco_opt2.7b"""),
"""blip2-opt-6.7b-coco""": ("""blip2_opt""", """caption_coco_opt6.7b"""),
"""blip2-flan-t5-xl""": ("""blip2_t5""", """pretrain_flant5xl"""),
"""blip2-flan-t5-xl-coco""": ("""blip2_t5""", """caption_coco_flant5xl"""),
"""blip2-flan-t5-xxl""": ("""blip2_t5""", """pretrain_flant5xxl"""),
}
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = model_name_to_original[model_name]
# load original model
print("""Loading original model...""" )
__SCREAMING_SNAKE_CASE = """cuda""" if torch.cuda.is_available() else """cpu"""
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = load_model_and_preprocess(
name=__UpperCAmelCase , model_type=__UpperCAmelCase , is_eval=__UpperCAmelCase , device=__UpperCAmelCase )
original_model.eval()
print("""Done!""" )
# update state dict keys
__SCREAMING_SNAKE_CASE = original_model.state_dict()
__SCREAMING_SNAKE_CASE = create_rename_keys(__UpperCAmelCase )
for src, dest in rename_keys:
rename_key(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
# some keys can be renamed efficiently
for key, val in state_dict.copy().items():
__SCREAMING_SNAKE_CASE = state_dict.pop(__UpperCAmelCase )
if key.startswith("""Qformer.bert""" ):
__SCREAMING_SNAKE_CASE = key.replace("""Qformer.bert""" , """qformer""" )
if "attention.self" in key:
__SCREAMING_SNAKE_CASE = key.replace("""self""" , """attention""" )
if "opt_proj" in key:
__SCREAMING_SNAKE_CASE = key.replace("""opt_proj""" , """language_projection""" )
if "t5_proj" in key:
__SCREAMING_SNAKE_CASE = key.replace("""t5_proj""" , """language_projection""" )
if key.startswith("""opt""" ):
__SCREAMING_SNAKE_CASE = key.replace("""opt""" , """language""" )
if key.startswith("""t5""" ):
__SCREAMING_SNAKE_CASE = key.replace("""t5""" , """language""" )
__SCREAMING_SNAKE_CASE = val
# read in qv biases
read_in_q_v_bias(__UpperCAmelCase , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = hf_model.load_state_dict(__UpperCAmelCase , strict=__UpperCAmelCase )
assert len(__UpperCAmelCase ) == 0
assert unexpected_keys == ["qformer.embeddings.position_ids"]
__SCREAMING_SNAKE_CASE = load_demo_image()
__SCREAMING_SNAKE_CASE = vis_processors["""eval"""](__UpperCAmelCase ).unsqueeze(0 ).to(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = tokenizer(["""\n"""] , return_tensors="""pt""" ).input_ids.to(__UpperCAmelCase )
# create processor
__SCREAMING_SNAKE_CASE = BlipImageProcessor(
size={"""height""": image_size, """width""": image_size} , image_mean=__UpperCAmelCase , image_std=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = BlipaProcessor(image_processor=__UpperCAmelCase , tokenizer=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = processor(images=__UpperCAmelCase , return_tensors="""pt""" ).pixel_values.to(__UpperCAmelCase )
# make sure processor creates exact same pixel values
assert torch.allclose(__UpperCAmelCase , __UpperCAmelCase )
original_model.to(__UpperCAmelCase )
hf_model.to(__UpperCAmelCase )
with torch.no_grad():
if "opt" in model_name:
__SCREAMING_SNAKE_CASE = original_model({"""image""": original_pixel_values, """text_input""": [""""""]} ).logits
__SCREAMING_SNAKE_CASE = hf_model(__UpperCAmelCase , __UpperCAmelCase ).logits
else:
__SCREAMING_SNAKE_CASE = original_model(
{"""image""": original_pixel_values, """text_input""": ["""\n"""], """text_output""": ["""\n"""]} ).logits
__SCREAMING_SNAKE_CASE = input_ids.masked_fill(input_ids == tokenizer.pad_token_id , -100 )
__SCREAMING_SNAKE_CASE = hf_model(__UpperCAmelCase , __UpperCAmelCase , labels=__UpperCAmelCase ).logits
assert original_logits.shape == logits.shape
print("""First values of original logits:""" , original_logits[0, :3, :3] )
print("""First values of HF logits:""" , logits[0, :3, :3] )
# assert values
if model_name == "blip2-flan-t5-xl":
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-41.5850, -4.4_4_4_0, -8.9_9_2_2], [-47.4322, -5.9_1_4_3, -1.7_3_4_0]] , device=__UpperCAmelCase )
assert torch.allclose(logits[0, :3, :3] , __UpperCAmelCase , atol=1e-4 )
elif model_name == "blip2-flan-t5-xl-coco":
__SCREAMING_SNAKE_CASE = torch.tensor(
[[-57.0109, -9.8_9_6_7, -12.6280], [-68.6578, -12.7191, -10.5065]] , device=__UpperCAmelCase )
else:
# cast to same type
__SCREAMING_SNAKE_CASE = logits.dtype
assert torch.allclose(original_logits.to(__UpperCAmelCase ) , __UpperCAmelCase , atol=1e-2 )
print("""Looks ok!""" )
print("""Generating a caption...""" )
__SCREAMING_SNAKE_CASE = """"""
__SCREAMING_SNAKE_CASE = tokenizer(__UpperCAmelCase , return_tensors="""pt""" ).input_ids.to(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = original_model.generate({"""image""": original_pixel_values} )
__SCREAMING_SNAKE_CASE = hf_model.generate(
__UpperCAmelCase , __UpperCAmelCase , do_sample=__UpperCAmelCase , num_beams=5 , max_length=30 , min_length=1 , top_p=0.9 , repetition_penalty=1.0 , length_penalty=1.0 , temperature=1 , )
print("""Original generation:""" , __UpperCAmelCase )
__SCREAMING_SNAKE_CASE = input_ids.shape[1]
__SCREAMING_SNAKE_CASE = processor.batch_decode(outputs[:, prompt_length:] , skip_special_tokens=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = [text.strip() for text in output_text]
print("""HF generation:""" , __UpperCAmelCase )
if pytorch_dump_folder_path is not None:
processor.save_pretrained(__UpperCAmelCase )
hf_model.save_pretrained(__UpperCAmelCase )
if push_to_hub:
processor.push_to_hub(f"""nielsr/{model_name}""" )
hf_model.push_to_hub(f"""nielsr/{model_name}""" )
if __name__ == "__main__":
a = argparse.ArgumentParser()
a = [
"blip2-opt-2.7b",
"blip2-opt-6.7b",
"blip2-opt-2.7b-coco",
"blip2-opt-6.7b-coco",
"blip2-flan-t5-xl",
"blip2-flan-t5-xl-coco",
"blip2-flan-t5-xxl",
]
parser.add_argument(
"--model_name",
default="blip2-opt-2.7b",
choices=choices,
type=str,
help="Path to hf config.json of model to convert",
)
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether to push the model and processor to the hub after converting",
)
a = parser.parse_args()
convert_blipa_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 702 |
'''simple docstring'''
import os
import string
import sys
a = 1 << 8
a = {
"tab": ord("\t"),
"newline": ord("\r"),
"esc": 27,
"up": 65 + ARROW_KEY_FLAG,
"down": 66 + ARROW_KEY_FLAG,
"right": 67 + ARROW_KEY_FLAG,
"left": 68 + ARROW_KEY_FLAG,
"mod_int": 91,
"undefined": sys.maxsize,
"interrupt": 3,
"insert": 50,
"delete": 51,
"pg_up": 53,
"pg_down": 54,
}
a = KEYMAP["up"]
a = KEYMAP["left"]
if sys.platform == "win32":
a = []
a = {
b"\xe0H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\x00H": KEYMAP["up"] - ARROW_KEY_FLAG,
b"\xe0P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\x00P": KEYMAP["down"] - ARROW_KEY_FLAG,
b"\xe0M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\x00M": KEYMAP["right"] - ARROW_KEY_FLAG,
b"\xe0K": KEYMAP["left"] - ARROW_KEY_FLAG,
b"\x00K": KEYMAP["left"] - ARROW_KEY_FLAG,
}
for i in range(10):
a = ord(str(i))
def __magic_name__ ( ) -> Union[str, Any]:
'''simple docstring'''
if os.name == "nt":
import msvcrt
__SCREAMING_SNAKE_CASE = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(__UpperCAmelCase ) == 0:
# Read the keystroke
__SCREAMING_SNAKE_CASE = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
__SCREAMING_SNAKE_CASE = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
__SCREAMING_SNAKE_CASE = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(__UpperCAmelCase )
if ord(__UpperCAmelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(126 ) )
__SCREAMING_SNAKE_CASE = chr(KEYMAP["""esc"""] )
except KeyError:
__SCREAMING_SNAKE_CASE = cha[1]
else:
__SCREAMING_SNAKE_CASE = ch.decode(__UpperCAmelCase )
else:
__SCREAMING_SNAKE_CASE = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
__SCREAMING_SNAKE_CASE = sys.stdin.fileno()
__SCREAMING_SNAKE_CASE = termios.tcgetattr(__UpperCAmelCase )
try:
tty.setraw(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = sys.stdin.read(1 )
finally:
termios.tcsetattr(__UpperCAmelCase , termios.TCSADRAIN , __UpperCAmelCase )
return ch
def __magic_name__ ( ) -> List[str]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(__UpperCAmelCase ) == KEYMAP["esc"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) == KEYMAP["mod_int"]:
__SCREAMING_SNAKE_CASE = get_raw_chars()
if ord(__UpperCAmelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(__UpperCAmelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(__UpperCAmelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"]
| 13 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
a = {"tokenization_byt5": ["ByT5Tokenizer"]}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 703 |
'''simple docstring'''
from __future__ import annotations
import bisect
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] < item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
if hi < 0:
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase )
while lo < hi:
__SCREAMING_SNAKE_CASE = lo + (hi - lo) // 2
if sorted_collection[mid] <= item:
__SCREAMING_SNAKE_CASE = mid + 1
else:
__SCREAMING_SNAKE_CASE = mid
return lo
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_left(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0 , __UpperCAmelCase = -1 ) -> None:
'''simple docstring'''
sorted_collection.insert(bisect_right(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = 0
__SCREAMING_SNAKE_CASE = len(__UpperCAmelCase ) - 1
while left <= right:
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
__SCREAMING_SNAKE_CASE = sorted_collection[midpoint]
if current_item == item:
return midpoint
elif item < current_item:
__SCREAMING_SNAKE_CASE = midpoint - 1
else:
__SCREAMING_SNAKE_CASE = midpoint + 1
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = bisect.bisect_left(__UpperCAmelCase , __UpperCAmelCase )
if index != len(__UpperCAmelCase ) and sorted_collection[index] == item:
return index
return None
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase ) -> int | None:
'''simple docstring'''
if right < left:
return None
__SCREAMING_SNAKE_CASE = left + (right - left) // 2
if sorted_collection[midpoint] == item:
return midpoint
elif sorted_collection[midpoint] > item:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , midpoint - 1 )
else:
return binary_search_by_recursion(__UpperCAmelCase , __UpperCAmelCase , midpoint + 1 , __UpperCAmelCase )
if __name__ == "__main__":
a = input("Enter numbers separated by comma:\n").strip()
a = sorted(int(item) for item in user_input.split(","))
a = int(input("Enter a single number to be found in the list:\n"))
a = binary_search(collection, target)
if result is None:
print(F'''{target} was not found in {collection}.''')
else:
print(F'''{target} was found at position {result} in {collection}.''')
| 13 | 0 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
a = logging.get_logger(__name__)
class __a ( _snake_case ):
__UpperCamelCase : Any = ['input_values', 'padding_mask']
def __init__( self : str ,lowerCamelCase : int = 1 ,lowerCamelCase : int = 2_4000 ,lowerCamelCase : float = 0.0 ,lowerCamelCase : float = None ,lowerCamelCase : float = None ,**lowerCamelCase : Union[str, Any] ,):
'''simple docstring'''
super().__init__(feature_size=lowerCamelCase ,sampling_rate=lowerCamelCase ,padding_value=lowerCamelCase ,**lowerCamelCase )
__SCREAMING_SNAKE_CASE = chunk_length_s
__SCREAMING_SNAKE_CASE = overlap
@property
def UpperCAmelCase__ ( self : Tuple ):
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def UpperCAmelCase__ ( self : Any ):
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 ,int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : Tuple ,lowerCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,lowerCamelCase : Optional[Union[bool, str, PaddingStrategy]] = None ,lowerCamelCase : Optional[bool] = False ,lowerCamelCase : Optional[int] = None ,lowerCamelCase : Optional[Union[str, TensorType]] = None ,lowerCamelCase : Optional[int] = None ,):
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f"""The model corresponding to this feature extractor: {self} was trained using a sampling rate of"""
f""" {self.sampling_rate}. Please make sure that the provided audio input was sampled with"""
f""" {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
if padding and truncation:
raise ValueError("""Both padding and truncation were set. Make sure you only set one.""" )
elif padding is None:
# by default let's pad the inputs
__SCREAMING_SNAKE_CASE = True
__SCREAMING_SNAKE_CASE = bool(
isinstance(lowerCamelCase ,(list, tuple) ) and (isinstance(raw_audio[0] ,(np.ndarray, tuple, list) )) )
if is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase ,dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(lowerCamelCase ,np.ndarray ):
__SCREAMING_SNAKE_CASE = np.asarray(lowerCamelCase ,dtype=np.floataa )
elif isinstance(lowerCamelCase ,np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__SCREAMING_SNAKE_CASE = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__SCREAMING_SNAKE_CASE = [np.asarray(lowerCamelCase ).T]
# verify inputs are valid
for idx, example in enumerate(lowerCamelCase ):
if example.ndim > 2:
raise ValueError(f"""Expected input shape (channels, length) but got shape {example.shape}""" )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(f"""Expected mono audio but example has {example.shape[-1]} channels""" )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(f"""Expected stereo audio but example has {example.shape[-1]} channels""" )
__SCREAMING_SNAKE_CASE = None
__SCREAMING_SNAKE_CASE = BatchFeature({"""input_values""": raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__SCREAMING_SNAKE_CASE = min(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.floor(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__SCREAMING_SNAKE_CASE = max(array.shape[0] for array in raw_audio )
__SCREAMING_SNAKE_CASE = int(np.ceil(max_length / self.chunk_stride ) )
__SCREAMING_SNAKE_CASE = (nb_step - 1) * self.chunk_stride + self.chunk_length
__SCREAMING_SNAKE_CASE = """max_length"""
else:
__SCREAMING_SNAKE_CASE = input_values
# normal padding on batch
if padded_inputs is None:
__SCREAMING_SNAKE_CASE = self.pad(
lowerCamelCase ,max_length=lowerCamelCase ,truncation=lowerCamelCase ,padding=lowerCamelCase ,return_attention_mask=lowerCamelCase ,)
if padding:
__SCREAMING_SNAKE_CASE = padded_inputs.pop("""attention_mask""" )
__SCREAMING_SNAKE_CASE = []
for example in padded_inputs.pop("""input_values""" ):
if self.feature_size == 1:
__SCREAMING_SNAKE_CASE = example[..., None]
input_values.append(example.T )
__SCREAMING_SNAKE_CASE = input_values
if return_tensors is not None:
__SCREAMING_SNAKE_CASE = padded_inputs.convert_to_tensors(lowerCamelCase )
return padded_inputs
| 704 |
'''simple docstring'''
import math
from enum import Enum
from typing import Optional, Union
from torch.optim import Optimizer
from torch.optim.lr_scheduler import LambdaLR
from .utils import logging
a = logging.get_logger(__name__)
class __a ( _snake_case ):
__UpperCamelCase : int = 'linear'
__UpperCamelCase : Tuple = 'cosine'
__UpperCamelCase : Tuple = 'cosine_with_restarts'
__UpperCamelCase : List[Any] = 'polynomial'
__UpperCamelCase : Optional[Any] = 'constant'
__UpperCamelCase : Optional[int] = 'constant_with_warmup'
__UpperCamelCase : List[Any] = 'piecewise_constant'
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
return LambdaLR(__UpperCAmelCase , lambda __UpperCAmelCase : 1 , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> List[Any]:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1.0 , __UpperCAmelCase ) )
return 1.0
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = -1 ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = step_rules.split(""",""" )
for rule_str in rule_list[:-1]:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = rule_str.split(""":""" )
__SCREAMING_SNAKE_CASE = int(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = float(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = value
__SCREAMING_SNAKE_CASE = float(rule_list[-1] )
def create_rules_function(__UpperCAmelCase , __UpperCAmelCase ):
def rule_func(__UpperCAmelCase ) -> float:
__SCREAMING_SNAKE_CASE = sorted(rules_dict.keys() )
for i, sorted_step in enumerate(__UpperCAmelCase ):
if steps < sorted_step:
return rules_dict[sorted_steps[i]]
return last_lr_multiple
return rule_func
__SCREAMING_SNAKE_CASE = create_rules_function(__UpperCAmelCase , __UpperCAmelCase )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , last_epoch=__UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=-1 ) -> int:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
return max(
0.0 , float(num_training_steps - current_step ) / float(max(1 , num_training_steps - num_warmup_steps ) ) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 0.5 , __UpperCAmelCase = -1 ) -> Dict:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * float(__UpperCAmelCase ) * 2.0 * progress )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = 1 , __UpperCAmelCase = -1 ) -> Tuple:
'''simple docstring'''
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
__SCREAMING_SNAKE_CASE = float(current_step - num_warmup_steps ) / float(max(1 , num_training_steps - num_warmup_steps ) )
if progress >= 1.0:
return 0.0
return max(0.0 , 0.5 * (1.0 + math.cos(math.pi * ((float(__UpperCAmelCase ) * progress) % 1.0) )) )
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase=1e-7 , __UpperCAmelCase=1.0 , __UpperCAmelCase=-1 ) -> Tuple:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = optimizer.defaults["""lr"""]
if not (lr_init > lr_end):
raise ValueError(f"""lr_end ({lr_end}) must be be smaller than initial lr ({lr_init})""" )
def lr_lambda(__UpperCAmelCase ):
if current_step < num_warmup_steps:
return float(__UpperCAmelCase ) / float(max(1 , __UpperCAmelCase ) )
elif current_step > num_training_steps:
return lr_end / lr_init # as LambdaLR multiplies by lr_init
else:
__SCREAMING_SNAKE_CASE = lr_init - lr_end
__SCREAMING_SNAKE_CASE = num_training_steps - num_warmup_steps
__SCREAMING_SNAKE_CASE = 1 - (current_step - num_warmup_steps) / decay_steps
__SCREAMING_SNAKE_CASE = lr_range * pct_remaining**power + lr_end
return decay / lr_init # as LambdaLR multiplies by lr_init
return LambdaLR(__UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase )
a = {
SchedulerType.LINEAR: get_linear_schedule_with_warmup,
SchedulerType.COSINE: get_cosine_schedule_with_warmup,
SchedulerType.COSINE_WITH_RESTARTS: get_cosine_with_hard_restarts_schedule_with_warmup,
SchedulerType.POLYNOMIAL: get_polynomial_decay_schedule_with_warmup,
SchedulerType.CONSTANT: get_constant_schedule,
SchedulerType.CONSTANT_WITH_WARMUP: get_constant_schedule_with_warmup,
SchedulerType.PIECEWISE_CONSTANT: get_piecewise_constant_schedule,
}
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = None , __UpperCAmelCase = 1 , __UpperCAmelCase = 1.0 , __UpperCAmelCase = -1 , ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = SchedulerType(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = TYPE_TO_SCHEDULER_FUNCTION[name]
if name == SchedulerType.CONSTANT:
return schedule_func(__UpperCAmelCase , last_epoch=__UpperCAmelCase )
if name == SchedulerType.PIECEWISE_CONSTANT:
return schedule_func(__UpperCAmelCase , step_rules=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_warmup_steps`
if num_warmup_steps is None:
raise ValueError(f"""{name} requires `num_warmup_steps`, please provide that argument.""" )
if name == SchedulerType.CONSTANT_WITH_WARMUP:
return schedule_func(__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
# All other schedulers require `num_training_steps`
if num_training_steps is None:
raise ValueError(f"""{name} requires `num_training_steps`, please provide that argument.""" )
if name == SchedulerType.COSINE_WITH_RESTARTS:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , num_cycles=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
if name == SchedulerType.POLYNOMIAL:
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , power=__UpperCAmelCase , last_epoch=__UpperCAmelCase , )
return schedule_func(
__UpperCAmelCase , num_warmup_steps=__UpperCAmelCase , num_training_steps=__UpperCAmelCase , last_epoch=__UpperCAmelCase )
| 13 | 0 |
'''simple docstring'''
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
a = TypeVar("T")
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return (position - 1) // 2
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return (2 * position) + 1
def __magic_name__ ( __UpperCAmelCase ) -> int:
'''simple docstring'''
return (2 * position) + 2
class __a ( Generic[T] ):
'''simple docstring'''
def __init__( self : str ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = []
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 0
def __len__( self : Union[str, Any] ):
'''simple docstring'''
return self.elements
def __repr__( self : Dict ):
'''simple docstring'''
return str(self.heap )
def UpperCAmelCase__ ( self : int ):
'''simple docstring'''
return self.elements == 0
def UpperCAmelCase__ ( self : int ,lowerCamelCase : T ,lowerCamelCase : int ):
'''simple docstring'''
self.heap.append((elem, weight) )
__SCREAMING_SNAKE_CASE = self.elements
self.elements += 1
self._bubble_up(lowerCamelCase )
def UpperCAmelCase__ ( self : Union[str, Any] ):
'''simple docstring'''
if self.elements > 1:
self._swap_nodes(0 ,self.elements - 1 )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[0]
self._bubble_down(lowerCamelCase )
return elem
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : T ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.position_map[elem]
__SCREAMING_SNAKE_CASE = (elem, weight)
if position > 0:
__SCREAMING_SNAKE_CASE = get_parent_position(lowerCamelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowerCamelCase )
else:
self._bubble_down(lowerCamelCase )
else:
self._bubble_down(lowerCamelCase )
def UpperCAmelCase__ ( self : Optional[int] ,lowerCamelCase : T ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.position_map[elem]
if curr_pos == 0:
return None
__SCREAMING_SNAKE_CASE = get_parent_position(lowerCamelCase )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowerCamelCase ,lowerCamelCase )
return self._bubble_up(lowerCamelCase )
return None
def UpperCAmelCase__ ( self : int ,lowerCamelCase : T ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.position_map[elem]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[curr_pos]
__SCREAMING_SNAKE_CASE = get_child_left_position(lowerCamelCase )
__SCREAMING_SNAKE_CASE = get_child_right_position(lowerCamelCase )
if child_left_position < self.elements and child_right_position < self.elements:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowerCamelCase ,lowerCamelCase )
return self._bubble_down(lowerCamelCase )
if child_left_position < self.elements:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowerCamelCase ,lowerCamelCase )
return self._bubble_down(lowerCamelCase )
else:
return None
if child_right_position < self.elements:
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowerCamelCase ,lowerCamelCase )
return self._bubble_down(lowerCamelCase )
return None
def UpperCAmelCase__ ( self : List[Any] ,lowerCamelCase : int ,lowerCamelCase : int ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0]
__SCREAMING_SNAKE_CASE = self.heap[nodea_pos][0]
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
__SCREAMING_SNAKE_CASE = nodea_pos
__SCREAMING_SNAKE_CASE = nodea_pos
class __a ( Generic[T] ):
'''simple docstring'''
def __init__( self : List[Any] ):
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
__SCREAMING_SNAKE_CASE = 0
def __repr__( self : Optional[int] ):
'''simple docstring'''
return str(self.connections )
def __len__( self : List[str] ):
'''simple docstring'''
return self.nodes
def UpperCAmelCase__ ( self : Union[str, Any] ,lowerCamelCase : T ):
'''simple docstring'''
if node not in self.connections:
__SCREAMING_SNAKE_CASE = {}
self.nodes += 1
def UpperCAmelCase__ ( self : str ,lowerCamelCase : T ,lowerCamelCase : T ,lowerCamelCase : int ):
'''simple docstring'''
self.add_node(lowerCamelCase )
self.add_node(lowerCamelCase )
__SCREAMING_SNAKE_CASE = weight
__SCREAMING_SNAKE_CASE = weight
def __magic_name__ ( __UpperCAmelCase , ) -> tuple[dict[T, int], dict[T, T | None]]:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {node: maxsize for node in graph.connections}
__SCREAMING_SNAKE_CASE = {node: None for node in graph.connections}
__SCREAMING_SNAKE_CASE = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__UpperCAmelCase , __UpperCAmelCase )
if priority_queue.is_empty():
return dist, parent
# initialization
__SCREAMING_SNAKE_CASE = priority_queue.extract_min()
__SCREAMING_SNAKE_CASE = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCAmelCase , dist[neighbour] )
__SCREAMING_SNAKE_CASE = node
# running prim's algorithm
while not priority_queue.is_empty():
__SCREAMING_SNAKE_CASE = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
__SCREAMING_SNAKE_CASE = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__UpperCAmelCase , dist[neighbour] )
__SCREAMING_SNAKE_CASE = node
return dist, parent
| 705 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a = {"configuration_sew": ["SEW_PRETRAINED_CONFIG_ARCHIVE_MAP", "SEWConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a = [
"SEW_PRETRAINED_MODEL_ARCHIVE_LIST",
"SEWForCTC",
"SEWForSequenceClassification",
"SEWModel",
"SEWPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
a = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 13 | 0 |
'''simple docstring'''
import logging
import os
from dataclasses import dataclass, field
from typing import Dict, Optional
import datasets
import numpy as np
import tensorflow as tf
from transformers import (
AutoConfig,
AutoTokenizer,
EvalPrediction,
HfArgumentParser,
PreTrainedTokenizer,
TFAutoModelForSequenceClassification,
TFTrainer,
TFTrainingArguments,
)
from transformers.utils import logging as hf_logging
hf_logging.set_verbosity_info()
hf_logging.enable_default_handler()
hf_logging.enable_explicit_format()
def __magic_name__ ( __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase , __UpperCAmelCase = None , ) -> int:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = {}
if train_file is not None:
__SCREAMING_SNAKE_CASE = [train_file]
if eval_file is not None:
__SCREAMING_SNAKE_CASE = [eval_file]
if test_file is not None:
__SCREAMING_SNAKE_CASE = [test_file]
__SCREAMING_SNAKE_CASE = datasets.load_dataset("""csv""" , data_files=__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = list(ds[list(files.keys() )[0]].features.keys() )
__SCREAMING_SNAKE_CASE = features_name.pop(__UpperCAmelCase )
__SCREAMING_SNAKE_CASE = list(set(ds[list(files.keys() )[0]][label_name] ) )
__SCREAMING_SNAKE_CASE = {label: i for i, label in enumerate(__UpperCAmelCase )}
__SCREAMING_SNAKE_CASE = tokenizer.model_input_names
__SCREAMING_SNAKE_CASE = {}
if len(__UpperCAmelCase ) == 1:
for k in files.keys():
__SCREAMING_SNAKE_CASE = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
example[features_name[0]] , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding="""max_length""" ) , batched=__UpperCAmelCase , )
elif len(__UpperCAmelCase ) == 2:
for k in files.keys():
__SCREAMING_SNAKE_CASE = ds[k].map(
lambda __UpperCAmelCase : tokenizer.batch_encode_plus(
(example[features_name[0]], example[features_name[1]]) , truncation=__UpperCAmelCase , max_length=__UpperCAmelCase , padding="""max_length""" , ) , batched=__UpperCAmelCase , )
def gen_train():
for ex in transformed_ds[datasets.Split.TRAIN]:
__SCREAMING_SNAKE_CASE = {k: v for k, v in ex.items() if k in input_names}
__SCREAMING_SNAKE_CASE = labelaid[ex[label_name]]
yield (d, label)
def gen_val():
for ex in transformed_ds[datasets.Split.VALIDATION]:
__SCREAMING_SNAKE_CASE = {k: v for k, v in ex.items() if k in input_names}
__SCREAMING_SNAKE_CASE = labelaid[ex[label_name]]
yield (d, label)
def gen_test():
for ex in transformed_ds[datasets.Split.TEST]:
__SCREAMING_SNAKE_CASE = {k: v for k, v in ex.items() if k in input_names}
__SCREAMING_SNAKE_CASE = labelaid[ex[label_name]]
yield (d, label)
__SCREAMING_SNAKE_CASE = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TRAIN in transformed_ds
else None
)
if train_ds is not None:
__SCREAMING_SNAKE_CASE = train_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TRAIN] ) ) )
__SCREAMING_SNAKE_CASE = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.VALIDATION in transformed_ds
else None
)
if val_ds is not None:
__SCREAMING_SNAKE_CASE = val_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.VALIDATION] ) ) )
__SCREAMING_SNAKE_CASE = (
tf.data.Dataset.from_generator(
__UpperCAmelCase , ({k: tf.intaa for k in input_names}, tf.intaa) , ({k: tf.TensorShape([None] ) for k in input_names}, tf.TensorShape([] )) , )
if datasets.Split.TEST in transformed_ds
else None
)
if test_ds is not None:
__SCREAMING_SNAKE_CASE = test_ds.apply(tf.data.experimental.assert_cardinality(len(ds[datasets.Split.TEST] ) ) )
return train_ds, val_ds, test_ds, labelaid
a = logging.getLogger(__name__)
@dataclass
class __a :
__UpperCamelCase : int = field(metadata={'help': 'Which column contains the label'} )
__UpperCamelCase : str = field(default=_snake_case, metadata={'help': 'The path of the training file'} )
__UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'The path of the development file'} )
__UpperCamelCase : Optional[str] = field(default=_snake_case, metadata={'help': 'The path of the test file'} )
__UpperCamelCase : int = field(
default=128, metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
__UpperCamelCase : bool = field(
default=_snake_case, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
@dataclass
class __a :
__UpperCamelCase : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
__UpperCamelCase : bool = field(default=_snake_case, metadata={'help': 'Set this flag to use fast tokenization.'} )
# If you want to tweak more attributes on your tokenizer, you should do it in a distinct script,
# or just modify its tokenizer_config.json.
__UpperCamelCase : Optional[str] = field(
default=_snake_case, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
def __magic_name__ ( ) -> Dict:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = HfArgumentParser((ModelArguments, DataTrainingArguments, TFTrainingArguments) )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = parser.parse_args_into_dataclasses()
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
f"""Output directory ({training_args.output_dir}) already exists and is not empty. Use"""
""" --overwrite_output_dir to overcome.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.info(
f"""n_replicas: {training_args.n_replicas}, distributed training: {bool(training_args.n_replicas > 1 )}, """
f"""16-bits training: {training_args.fpaa}""" )
logger.info(f"""Training/evaluation parameters {training_args}""" )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
__SCREAMING_SNAKE_CASE = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = get_tfds(
train_file=data_args.train_file , eval_file=data_args.dev_file , test_file=data_args.test_file , tokenizer=__UpperCAmelCase , label_column_id=data_args.label_column_id , max_seq_length=data_args.max_seq_length , )
__SCREAMING_SNAKE_CASE = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , num_labels=len(__UpperCAmelCase ) , labelaid=__UpperCAmelCase , idalabel={id: label for label, id in labelaid.items()} , finetuning_task="""text-classification""" , cache_dir=model_args.cache_dir , )
with training_args.strategy.scope():
__SCREAMING_SNAKE_CASE = TFAutoModelForSequenceClassification.from_pretrained(
model_args.model_name_or_path , from_pt=bool(""".bin""" in model_args.model_name_or_path ) , config=__UpperCAmelCase , cache_dir=model_args.cache_dir , )
def compute_metrics(__UpperCAmelCase ) -> Dict:
__SCREAMING_SNAKE_CASE = np.argmax(p.predictions , axis=1 )
return {"acc": (preds == p.label_ids).mean()}
# Initialize our Trainer
__SCREAMING_SNAKE_CASE = TFTrainer(
model=__UpperCAmelCase , args=__UpperCAmelCase , train_dataset=__UpperCAmelCase , eval_dataset=__UpperCAmelCase , compute_metrics=__UpperCAmelCase , )
# Training
if training_args.do_train:
trainer.train()
trainer.save_model()
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
__SCREAMING_SNAKE_CASE = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
__SCREAMING_SNAKE_CASE = trainer.evaluate()
__SCREAMING_SNAKE_CASE = os.path.join(training_args.output_dir , """eval_results.txt""" )
with open(__UpperCAmelCase , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in result.items():
logger.info(f""" {key} = {value}""" )
writer.write(f"""{key} = {value}\n""" )
results.update(__UpperCAmelCase )
return results
if __name__ == "__main__":
main()
| 706 |
'''simple docstring'''
import requests
from bsa import BeautifulSoup
def __magic_name__ ( __UpperCAmelCase = "AAPL" ) -> str:
'''simple docstring'''
__SCREAMING_SNAKE_CASE = f"""https://in.finance.yahoo.com/quote/{symbol}?s={symbol}"""
__SCREAMING_SNAKE_CASE = BeautifulSoup(requests.get(__UpperCAmelCase ).text , """html.parser""" )
__SCREAMING_SNAKE_CASE = """My(6px) Pos(r) smartphone_Mt(6px)"""
return soup.find("""div""" , class_=class_ ).find("""span""" ).text
if __name__ == "__main__":
for symbol in "AAPL AMZN IBM GOOG MSFT ORCL".split():
print(F'''Current {symbol:<4} stock price is {stock_price(symbol):>8}''')
| 13 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.