code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
print((lambda quine: quine % quine)('''print((lambda quine: quine %% quine)(%r))'''))
| 703 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=0 ) -> Dict:
A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) )
A = np.random.RandomState(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = ort.SessionOptions()
A = False
return options
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 22 | 0 |
"""simple docstring"""
import numpy
# List of input, output pairs
_lowercase = (
((5, 2, 3), 15),
((6, 5, 9), 25),
((11, 12, 13), 41),
((1, 1, 1), 8),
((11, 12, 13), 41),
)
_lowercase = (((5_15, 22, 13), 5_55), ((61, 35, 49), 1_50))
_lowercase = [2, 4, 1, 5]
_lowercase = len(train_data)
_lowercase = 0.009
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : str="train" ):
return calculate_hypothesis_value(snake_case__ , snake_case__ ) - output(
snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : Optional[int] ):
A = 0
for i in range(len(snake_case__ ) - 1 ):
hyp_val += data_input_tuple[i] * parameter_vector[i + 1]
hyp_val += parameter_vector[0]
return hyp_val
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : List[str] ):
if data_set == "train":
return train_data[example_no][1]
elif data_set == "test":
return test_data[example_no][1]
return None
def _snake_case ( snake_case__ : Any , snake_case__ : List[str] ):
if data_set == "train":
return _hypothesis_value(train_data[example_no][0] )
elif data_set == "test":
return _hypothesis_value(test_data[example_no][0] )
return None
def _snake_case ( snake_case__ : Tuple , snake_case__ : Any=m ):
A = 0
for i in range(snake_case__ ):
if index == -1:
summation_value += _error(snake_case__ )
else:
summation_value += _error(snake_case__ ) * train_data[i][0][index]
return summation_value
def _snake_case ( snake_case__ : Optional[int] ):
A = summation_of_cost_derivative(snake_case__ , snake_case__ ) / m
return cost_derivative_value
def _snake_case ( ):
global parameter_vector
# Tune these values to set a tolerance value for predicted output
A = 0.000002
A = 0
A = 0
while True:
j += 1
A = [0, 0, 0, 0]
for i in range(0 , len(snake_case__ ) ):
A = get_cost_derivative(i - 1 )
A = (
parameter_vector[i] - LEARNING_RATE * cost_derivative
)
if numpy.allclose(
snake_case__ , snake_case__ , atol=snake_case__ , rtol=snake_case__ , ):
break
A = temp_parameter_vector
print(('Number of iterations:', j) )
def _snake_case ( ):
for i in range(len(snake_case__ ) ):
print(('Actual output value:', output(snake_case__ , 'test' )) )
print(('Hypothesis output:', calculate_hypothesis_value(snake_case__ , 'test' )) )
if __name__ == "__main__":
run_gradient_descent()
print('''\nTesting gradient descent for a linear hypothesis function.\n''')
test_gradient_descent() | 704 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''pixel_values''']
def __init__( self : Optional[Any] ,A_ : bool = True ,A_ : Optional[Dict[str, int]] = None ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Optional[Any] ,) -> None:
super().__init__(**A_ )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(A_ ,default_to_square=A_ )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ ,default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ )
return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : float ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[str] ) -> np.ndarray:
return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Any ,) -> np.ndarray:
return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : ImageInput ,A_ : Optional[bool] = None ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : Optional[bool] = None ,A_ : Optional[float] = None ,A_ : Optional[bool] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**A_ : Tuple ,) -> List[Any]:
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(A_ ,default_to_square=A_ )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(A_ ) for image in images]
if do_resize:
A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images]
if do_center_crop:
A = [self.center_crop(image=A_ ,size=A_ ) for image in images]
if do_rescale:
A = [self.rescale(image=A_ ,scale=A_ ) for image in images]
if do_normalize:
A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images]
A = [to_channel_dimension_format(A_ ,A_ ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=A_ ,tensor_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : List[Tuple] = None ) -> str:
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(A_ ):
A = target_sizes.numpy()
A = []
for idx in range(len(A_ ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=A_ )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 22 | 0 |
"""simple docstring"""
import unittest
from transformers import BertGenerationConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import BertGenerationDecoder, BertGenerationEncoder
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : Any ,A_ : int=13 ,A_ : Optional[int]=7 ,A_ : List[Any]=True ,A_ : str=True ,A_ : Optional[Any]=99 ,A_ : Optional[Any]=32 ,A_ : Optional[int]=5 ,A_ : str=4 ,A_ : int=37 ,A_ : Optional[Any]="gelu" ,A_ : int=0.1 ,A_ : Dict=0.1 ,A_ : Any=50 ,A_ : Tuple=0.02 ,A_ : Tuple=True ,A_ : Optional[Any]=None ,) -> int:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = initializer_range
A = use_labels
A = scope
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
if self.use_labels:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = self.get_config()
return config, input_ids, input_mask, token_labels
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
return BertGenerationConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,is_decoder=A_ ,initializer_range=self.initializer_range ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
(
(
A
) , (
A
) , (
A
) , (
A
) ,
) = self.prepare_config_and_inputs()
A = True
A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
token_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ,A_ : Dict ,A_ : Any ,A_ : Union[str, Any] ,**A_ : int ,) -> List[Any]:
A = BertGenerationEncoder(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : str ,A_ : Union[str, Any] ,A_ : int ,A_ : Union[str, Any] ,A_ : Dict ,A_ : int ,**A_ : Union[str, Any] ,) -> int:
A = True
A = BertGenerationEncoder(config=A_ )
model.to(A_ )
model.eval()
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,)
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,)
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : Optional[int] ,A_ : Dict ,**A_ : Tuple ,) -> int:
A = True
A = True
A = BertGenerationDecoder(config=A_ ).to(A_ ).eval()
# first forward pass
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,use_cache=A_ ,)
A = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
A = ids_tensor((self.batch_size, 3) ,config.vocab_size )
A = ids_tensor((self.batch_size, 3) ,vocab_size=2 )
# append to next input_ids and
A = torch.cat([input_ids, next_tokens] ,dim=-1 )
A = torch.cat([input_mask, next_mask] ,dim=-1 )
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,output_hidden_states=A_ ,)['hidden_states'][0]
A = model(
A_ ,attention_mask=A_ ,encoder_hidden_states=A_ ,encoder_attention_mask=A_ ,past_key_values=A_ ,output_hidden_states=A_ ,)['hidden_states'][0]
# select random slice
A = ids_tensor((1,) ,output_from_past.shape[-1] ).item()
A = output_from_no_past[:, -3:, random_slice_idx].detach()
A = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(A_ ,A_ ,atol=1e-3 ) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : int ,A_ : int ,A_ : List[str] ,*A_ : int ,) -> List[str]:
A = BertGenerationDecoder(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A , A , A , A = self.prepare_config_and_inputs()
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: str = (BertGenerationEncoder, BertGenerationDecoder) if is_torch_available() else ()
_lowerCamelCase: Optional[int] = (BertGenerationDecoder,) if is_torch_available() else ()
_lowerCamelCase: Union[str, Any] = (
{'''feature-extraction''': BertGenerationEncoder, '''text-generation''': BertGenerationDecoder}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = BertGenerationEncoderTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = 'bert'
self.model_tester.create_and_check_model(A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
# This regression test was failing with PyTorch < 1.3
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.model_tester.prepare_config_and_inputs_for_decoder()
A = None
self.model_tester.create_and_check_model_as_decoder(
A_ ,A_ ,A_ ,A_ ,A_ ,A_ ,)
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_for_causal_lm(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = BertGenerationEncoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
A = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A = model(A_ )[0]
A = torch.Size([1, 8, 1024] )
self.assertEqual(output.shape ,A_ )
A = torch.tensor(
[[[0.17_75, 0.00_83, -0.03_21], [1.60_02, 0.12_87, 0.39_12], [2.14_73, 0.57_91, 0.60_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,A_ ,atol=1e-4 ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = BertGenerationDecoder.from_pretrained('google/bert_for_seq_generation_L-24_bbc_encoder' )
A = torch.tensor([[101, 7592, 1010, 2026, 3899, 2003, 1_0140, 102]] )
with torch.no_grad():
A = model(A_ )[0]
A = torch.Size([1, 8, 5_0358] )
self.assertEqual(output.shape ,A_ )
A = torch.tensor(
[[[-0.57_88, -2.59_94, -3.70_54], [0.04_38, 4.79_97, 1.87_95], [1.58_62, 6.64_09, 4.46_38]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,A_ ,atol=1e-4 ) ) | 705 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowercase = data_utils.TransfoXLTokenizer
_lowercase = data_utils.TransfoXLCorpus
_lowercase = data_utils
_lowercase = data_utils
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case__ , 'rb' ) as fp:
A = pickle.load(snake_case__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
A = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
A = corpus.vocab.__dict__
torch.save(snake_case__ , snake_case__ )
A = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , snake_case__ )
A = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(snake_case__ , snake_case__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
A = os.path.abspath(snake_case__ )
A = os.path.abspath(snake_case__ )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
A = TransfoXLConfig()
else:
A = TransfoXLConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
A = TransfoXLLMHeadModel(snake_case__ )
A = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
A = os.path.join(snake_case__ , snake_case__ )
A = os.path.join(snake_case__ , snake_case__ )
print(F'Save PyTorch model to {os.path.abspath(snake_case__ )}' )
torch.save(model.state_dict() , snake_case__ )
print(F'Save configuration file to {os.path.abspath(snake_case__ )}' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
_lowercase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 22 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''junnyu/roformer_chinese_small''': '''https://huggingface.co/junnyu/roformer_chinese_small/resolve/main/config.json''',
'''junnyu/roformer_chinese_base''': '''https://huggingface.co/junnyu/roformer_chinese_base/resolve/main/config.json''',
'''junnyu/roformer_chinese_char_small''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_small/resolve/main/config.json'''
),
'''junnyu/roformer_chinese_char_base''': (
'''https://huggingface.co/junnyu/roformer_chinese_char_base/resolve/main/config.json'''
),
'''junnyu/roformer_small_discriminator''': (
'''https://huggingface.co/junnyu/roformer_small_discriminator/resolve/main/config.json'''
),
'''junnyu/roformer_small_generator''': (
'''https://huggingface.co/junnyu/roformer_small_generator/resolve/main/config.json'''
),
# See all RoFormer models at https://huggingface.co/models?filter=roformer
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: str = '''roformer'''
def __init__( self : Optional[Any] ,A_ : str=5_0000 ,A_ : str=None ,A_ : Optional[Any]=768 ,A_ : Optional[int]=12 ,A_ : List[str]=12 ,A_ : int=3072 ,A_ : str="gelu" ,A_ : List[str]=0.1 ,A_ : Dict=0.1 ,A_ : Union[str, Any]=1536 ,A_ : Dict=2 ,A_ : str=0.02 ,A_ : Union[str, Any]=1e-12 ,A_ : List[str]=0 ,A_ : List[str]=False ,A_ : Optional[Any]=True ,**A_ : Tuple ,) -> str:
super().__init__(pad_token_id=A_ ,**A_ )
A = vocab_size
A = hidden_size if embedding_size is None else embedding_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = rotary_value
A = use_cache
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
if self.task == "multiple-choice":
A = {0: 'batch', 1: 'choice', 2: 'sequence'}
else:
A = {0: 'batch', 1: 'sequence'}
A = {0: 'batch', 1: 'sequence'}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
('token_type_ids', dynamic_axis),
] )
| 706 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ) -> int:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Optional[Any]=1 ) -> int:
if self.graph.get(A_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A = [[w, v]]
if not self.graph.get(A_ ):
A = []
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ) -> Optional[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int=-2 ,A_ : Dict=-1 ) -> str:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any=-1 ) -> int:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any]=-2 ) -> Optional[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ) -> Any:
A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> str:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any]=-2 ) -> Any:
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return sorted_nodes
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple=-2 ,A_ : List[str]=-1 ) -> str:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any]=-2 ) -> Dict:
A = time()
self.bfs(A_ )
A = time()
return end - begin
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> Tuple:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[str]=1 ) -> Dict:
# check if the u exists
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A = [[w, v]]
# add the other way
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A = [[w, u]]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : List[str] ) -> List[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
# the other way round
if self.graph.get(A_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=-2 ,A_ : List[Any]=-1 ) -> int:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=-1 ) -> List[Any]:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict=-2 ) -> List[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[Any]:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any]=-2 ,A_ : List[str]=-1 ) -> Any:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=-2 ) -> Union[str, Any]:
A = time()
self.bfs(A_ )
A = time()
return end - begin | 22 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMParallelScheduler
from .test_schedulers import SchedulerCommonTest
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = (DDPMParallelScheduler,)
def _SCREAMING_SNAKE_CASE ( self : Dict ,**A_ : int ) -> Dict:
A = {
'num_train_timesteps': 1000,
'beta_start': 0.00_01,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**A_ )
return config
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
for beta_start, beta_end in zip([0.00_01, 0.0_01, 0.01, 0.1] ,[0.0_02, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=A_ ,beta_end=A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
self.check_over_configs(thresholding=A_ )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=A_ ,prediction_type=A_ ,sample_max_value=A_ ,)
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**A_ )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_09_79 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1e-5
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**A_ )
A = len(A_ )
A = self.dummy_model()
A = self.dummy_sample_deter
A = self.dummy_sample_deter + 0.1
A = self.dummy_sample_deter - 0.1
A = samplea.shape[0]
A = torch.stack([samplea, samplea, samplea] ,dim=0 )
A = torch.arange(A_ )[0:3, None].repeat(1 ,A_ )
A = model(samples.flatten(0 ,1 ) ,timesteps.flatten(0 ,1 ) )
A = scheduler.batch_step_no_noise(A_ ,timesteps.flatten(0 ,1 ) ,samples.flatten(0 ,1 ) )
A = torch.sum(torch.abs(A_ ) )
A = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 1153.1833 ) < 1e-2
assert abs(result_mean.item() - 0.50_05 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**A_ )
A = len(A_ )
A = self.dummy_model()
A = self.dummy_sample_deter
A = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
A = model(A_ ,A_ )
# 2. predict previous mean of sample x_t-1
A = scheduler.step(A_ ,A_ ,A_ ,generator=A_ ).prev_sample
A = pred_prev_sample
A = torch.sum(torch.abs(A_ ) )
A = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 258.9606 ) < 1e-2
assert abs(result_mean.item() - 0.33_72 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = self.scheduler_classes[0]
A = self.get_scheduler_config(prediction_type='v_prediction' )
A = scheduler_class(**A_ )
A = len(A_ )
A = self.dummy_model()
A = self.dummy_sample_deter
A = torch.manual_seed(0 )
for t in reversed(range(A_ ) ):
# 1. predict noise residual
A = model(A_ ,A_ )
# 2. predict previous mean of sample x_t-1
A = scheduler.step(A_ ,A_ ,A_ ,generator=A_ ).prev_sample
A = pred_prev_sample
A = torch.sum(torch.abs(A_ ) )
A = torch.mean(torch.abs(A_ ) )
assert abs(result_sum.item() - 202.0296 ) < 1e-2
assert abs(result_mean.item() - 0.26_31 ) < 1e-3
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**A_ )
A = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=A_ )
A = scheduler.timesteps
for i, timestep in enumerate(A_ ):
if i == len(A_ ) - 1:
A = -1
else:
A = timesteps[i + 1]
A = scheduler.previous_timestep(A_ )
A = prev_t.item()
self.assertEqual(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Union[str, Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**A_ )
A = [100, 87, 50, 51, 0]
with self.assertRaises(A_ ,msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**A_ )
A = [100, 87, 50, 1, 0]
A = len(A_ )
with self.assertRaises(A_ ,msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=A_ ,timesteps=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
A = self.scheduler_classes[0]
A = self.get_scheduler_config()
A = scheduler_class(**A_ )
A = [scheduler.config.num_train_timesteps]
with self.assertRaises(
A_ ,msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' ,):
scheduler.set_timesteps(timesteps=A_ ) | 707 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _snake_case ( snake_case__ : str = "isbn/0140328726" ):
A = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
A = F'{olid} is not a valid Open Library olid'
raise ValueError(snake_case__ )
return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json()
def _snake_case ( snake_case__ : dict ):
A = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
A = data['First sentence']['value']
for key, value in data.items():
if isinstance(snake_case__ , snake_case__ ):
A = ', '.join(snake_case__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowercase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
_lowercase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""") | 22 | 0 |
"""simple docstring"""
_lowercase = '''0.21.0'''
from .accelerator import Accelerator
from .big_modeling import (
cpu_offload,
cpu_offload_with_hook,
disk_offload,
dispatch_model,
init_empty_weights,
init_on_device,
load_checkpoint_and_dispatch,
)
from .data_loader import skip_first_batches
from .launchers import debug_launcher, notebook_launcher
from .state import PartialState
from .utils import (
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
FullyShardedDataParallelPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
find_executable_batch_size,
infer_auto_device_map,
is_rich_available,
load_checkpoint_in_model,
synchronize_rng_states,
)
if is_rich_available():
from .utils import rich | 708 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''PerceiverFeatureExtractor''']
_lowercase = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 0 |
import math
def _snake_case ( snake_case__ : int = 100 ):
A = sum(i * i for i in range(1 , n + 1 ) )
A = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(F"""{solution() = }""") | 709 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _snake_case ( snake_case__ : int ):
A = SwinvaConfig()
A = swinva_name.split('_' )
A = name_split[1]
if "to" in name_split[3]:
A = int(name_split[3][-3:] )
else:
A = int(name_split[3] )
if "to" in name_split[2]:
A = int(name_split[2][-2:] )
else:
A = int(name_split[2][6:] )
if model_size == "tiny":
A = 96
A = (2, 2, 6, 2)
A = (3, 6, 12, 24)
elif model_size == "small":
A = 96
A = (2, 2, 18, 2)
A = (3, 6, 12, 24)
elif model_size == "base":
A = 128
A = (2, 2, 18, 2)
A = (4, 8, 16, 32)
else:
A = 192
A = (2, 2, 18, 2)
A = (6, 12, 24, 48)
if "to" in swinva_name:
A = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
A = 2_1841
A = 'huggingface/label-files'
A = 'imagenet-22k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
else:
A = 1000
A = 'huggingface/label-files'
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = img_size
A = num_classes
A = embed_dim
A = depths
A = num_heads
A = window_size
return config
def _snake_case ( snake_case__ : List[Any] ):
if "patch_embed.proj" in name:
A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
A = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
A = 'encoder.' + name
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
A = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
A = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
A = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
A = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
A = 'layernorm.weight'
if name == "norm.bias":
A = 'layernorm.bias'
if "head" in name:
A = name.replace('head' , 'classifier' )
else:
A = 'swinv2.' + name
return name
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ):
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A = key.split('.' )
A = int(key_split[1] )
A = int(key_split[3] )
A = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[
dim : dim * 2
]
A = val[-dim:]
else:
A = val
return orig_state_dict
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ):
A = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A = get_swinva_config(snake_case__ )
A = SwinvaForImageClassification(snake_case__ )
model.eval()
A = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A = image_processor(images=snake_case__ , return_tensors='pt' )
A = timm_model(inputs['pixel_values'] )
A = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowercase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path) | 22 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ):
A = len(snake_case__ )
A = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
A = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
A = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 710 |
"""simple docstring"""
from math import pi, sqrt
def _snake_case ( snake_case__ : float ):
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(snake_case__ ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(snake_case__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _snake_case ( ):
assert gamma(0.5 ) == sqrt(snake_case__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = 1.0
while num:
_lowercase = float(input('''Gamma of: '''))
print(F"""gamma({num}) = {gamma(num)}""")
print('''\nEnter 0 to exit...''') | 22 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from argparse import ArgumentParser
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
@abstractmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Optional[int]:
raise NotImplementedError()
@abstractmethod
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
raise NotImplementedError() | 711 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: torch.FloatTensor
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : Dict=3 ,A_ : int=3 ,A_ : str=("DownEncoderBlock2D",) ,A_ : Dict=(64,) ,A_ : str=2 ,A_ : Union[str, Any]=32 ,A_ : Optional[int]="silu" ,A_ : str=True ,) -> Union[str, Any]:
super().__init__()
A = layers_per_block
A = torch.nn.Convad(
A_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
# down
A = block_out_channels[0]
for i, down_block_type in enumerate(A_ ):
A = output_channel
A = block_out_channels[i]
A = i == len(A_ ) - 1
A = get_down_block(
A_ ,num_layers=self.layers_per_block ,in_channels=A_ ,out_channels=A_ ,add_downsample=not is_final_block ,resnet_eps=1e-6 ,downsample_padding=0 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,)
self.down_blocks.append(A_ )
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# out
A = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = 2 * out_channels if double_z else out_channels
A = nn.Convad(block_out_channels[-1] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = x
A = self.conv_in(A_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : Dict ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
# down
if is_torch_version('>=' ,'1.11.0' ):
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,use_reentrant=A_ )
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,use_reentrant=A_ )
else:
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ )
# middle
A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,A_ )
else:
# down
for down_block in self.down_blocks:
A = down_block(A_ )
# middle
A = self.mid_block(A_ )
# post-process
A = self.conv_norm_out(A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any]=3 ,A_ : Optional[int]=3 ,A_ : str=("UpDecoderBlock2D",) ,A_ : Any=(64,) ,A_ : Optional[int]=2 ,A_ : Optional[int]=32 ,A_ : Tuple="silu" ,A_ : Optional[int]="group" ,) -> Any:
super().__init__()
A = layers_per_block
A = nn.Convad(
A_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
A = in_channels if norm_type == 'spatial' else None
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# up
A = list(reversed(A_ ) )
A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
A = output_channel
A = reversed_block_out_channels[i]
A = i == len(A_ ) - 1
A = get_up_block(
A_ ,num_layers=self.layers_per_block + 1 ,in_channels=A_ ,out_channels=A_ ,prev_output_channel=A_ ,add_upsample=not is_final_block ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,resnet_time_scale_shift=A_ ,)
self.up_blocks.append(A_ )
A = output_channel
# out
if norm_type == "spatial":
A = SpatialNorm(block_out_channels[0] ,A_ )
else:
A = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = nn.Convad(block_out_channels[0] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Union[str, Any]=None ) -> Any:
A = z
A = self.conv_in(A_ )
A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : List[Any] ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
if is_torch_version('>=' ,'1.11.0' ):
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ ,use_reentrant=A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,A_ ,use_reentrant=A_ )
else:
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ,A_ )
else:
# middle
A = self.mid_block(A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = up_block(A_ ,A_ )
# post-process
if latent_embeds is None:
A = self.conv_norm_out(A_ )
else:
A = self.conv_norm_out(A_ ,A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict=None ,A_ : List[Any]="random" ,A_ : Optional[int]=False ,A_ : str=True ) -> List[str]:
super().__init__()
A = n_e
A = vq_embed_dim
A = beta
A = legacy
A = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
A = remap
if self.remap is not None:
self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) )
A = self.used.shape[0]
A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A = self.re_embed
A = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
A = n_e
A = sane_index_shape
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Any:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
A = (inds[:, :, None] == used[None, None, ...]).long()
A = match.argmax(-1 )
A = match.sum(2 ) < 1
if self.unknown_index == "random":
A = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
A = self.unknown_index
return new.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
if self.re_embed > self.used.shape[0]: # extra token
A = 0 # simply set to zero
A = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,A_ )
return back.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> str:
# reshape z -> (batch, height, width, channel) and flatten
A = z.permute(0 ,2 ,3 ,1 ).contiguous()
A = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A = torch.argmin(torch.cdist(A_ ,self.embedding.weight ) ,dim=1 )
A = self.embedding(A_ ).view(z.shape )
A = None
A = None
# compute loss for embedding
if not self.legacy:
A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A = z + (z_q - z).detach()
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
A = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
A = self.remap_to_used(A_ )
A = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
A = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : str ) -> Union[str, Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A = indices.reshape(shape[0] ,-1 ) # add batch axis
A = self.unmap_to_all(A_ )
A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A = self.embedding(A_ )
if shape is not None:
A = z_q.view(A_ )
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : str ,A_ : Tuple ,A_ : Dict=False ) -> List[str]:
A = parameters
A , A = torch.chunk(A_ ,2 ,dim=1 )
A = torch.clamp(self.logvar ,-30.0 ,20.0 )
A = deterministic
A = torch.exp(0.5 * self.logvar )
A = torch.exp(self.logvar )
if self.deterministic:
A = A = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
A = randn_tensor(
self.mean.shape ,generator=A_ ,device=self.parameters.device ,dtype=self.parameters.dtype )
A = self.mean + self.std * sample
return x
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple=None ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : Union[str, Any]=[1, 2, 3] ) -> List[str]:
if self.deterministic:
return torch.Tensor([0.0] )
A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return self.mean | 22 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[int]=2 ,A_ : Any=True ,A_ : List[str]=False ,A_ : Tuple=10 ,A_ : List[Any]=3 ,A_ : Any=32 * 8 ,A_ : Dict=32 * 8 ,A_ : List[Any]=4 ,A_ : Tuple=64 ,) -> List[str]:
A = parent
A = batch_size
A = is_training
A = use_auxiliary_loss
A = num_queries
A = num_channels
A = min_size
A = max_size
A = num_labels
A = hidden_dim
A = hidden_dim
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A_ )
A = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=A_ )
A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=A_ ) > 0.5
).float()
A = (torch.rand((self.batch_size, self.num_labels) ,device=A_ ) > 0.5).long()
A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
A = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
A = self.num_queries
A = self.num_labels
A = [1, 1, 1, 1]
A = self.num_channels
A = 64
A = 128
A = self.hidden_dim
A = self.hidden_dim
A = self.hidden_dim
return config
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A , A , A , A , A = self.prepare_config_and_inputs()
A = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = output.encoder_hidden_states
A = output.pixel_decoder_hidden_states
A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,config.decoder_layers )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : List[str] ,A_ : Union[str, Any]=False ) -> str:
with torch.no_grad():
A = MaskaFormerModel(config=A_ )
model.to(A_ )
model.eval()
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ ,output_hidden_states=A_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Any ,A_ : Dict ,A_ : Any ,A_ : Dict ) -> Optional[Any]:
A = MaskaFormerForUniversalSegmentation(config=A_ )
model.to(A_ )
model.eval()
def comm_check_on_output(A_ : str ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ )
comm_check_on_output(A_ )
A = model(
pixel_values=A_ ,pixel_mask=A_ ,mask_labels=A_ ,class_labels=A_ )
comm_check_on_output(A_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowerCamelCase: Optional[Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_lowerCamelCase: int = False
_lowerCamelCase: Dict = False
_lowerCamelCase: List[str] = False
_lowerCamelCase: int = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = MaskaFormerModelTester(self )
A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
A = MaskaFormerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = (self.model_tester.min_size,) * 2
A = {
'pixel_values': torch.randn((2, 3, *size) ,device=A_ ),
'mask_labels': torch.randn((2, 10, *size) ,device=A_ ),
'class_labels': torch.zeros(2 ,10 ,device=A_ ).long(),
}
A = self.model_tester.get_config()
A = MaskaFormerForUniversalSegmentation(A_ ).to(A_ )
A = model(**A_ )
self.assertTrue(outputs.loss is not None )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ ).to(A_ )
A = model(**A_ ,output_attentions=A_ )
self.assertTrue(outputs.attentions is not None )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
if not self.model_tester.is_training:
return
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = model_class(A_ )
model.to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = True
A = True
A = model_class(A_ ).to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ )
A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowercase = 1e-4
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A_ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
A = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
# masks_queries_logits
A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
A = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
A = torch.tensor(A_ ).to(A_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,A_ ,atol=A_ ) )
# class_queries_logits
A = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
A = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(A_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
A = inputs['pixel_values'].to(A_ )
A = [el.to(A_ ) for el in inputs['mask_labels']]
A = [el.to(A_ ) for el in inputs['class_labels']]
with torch.no_grad():
A = model(**A_ )
self.assertTrue(outputs.loss is not None ) | 712 |
"""simple docstring"""
def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ):
A = len(snake_case__ )
A = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
A = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
A = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 | 0 |
import argparse
import collections
import os
import re
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_table.py
_lowercase = '''src/transformers'''
_lowercase = '''docs/source/en'''
_lowercase = '''.'''
def _snake_case ( snake_case__ : str , snake_case__ : List[Any] , snake_case__ : Dict ):
with open(snake_case__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
A = f.readlines()
# Find the start prompt.
A = 0
while not lines[start_index].startswith(snake_case__ ):
start_index += 1
start_index += 1
A = start_index
while not lines[end_index].startswith(snake_case__ ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# Add here suffixes that are used to identify models, separated by |
_lowercase = '''Model|Encoder|Decoder|ForConditionalGeneration'''
# Regexes that match TF/Flax/PT model names.
_lowercase = re.compile(r'''TF(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
_lowercase = re.compile(r'''Flax(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# Will match any TF or Flax model too so need to be in an else branch afterthe two previous regexes.
_lowercase = re.compile(r'''(.*)(?:Model|Encoder|Decoder|ForConditionalGeneration)''')
# This is to make sure the transformers module imported is the one in the repo.
_lowercase = direct_transformers_import(TRANSFORMERS_PATH)
def _snake_case ( snake_case__ : Any ):
A = re.finditer('.+?(?:(?<=[a-z])(?=[A-Z])|(?<=[A-Z])(?=[A-Z][a-z])|$)' , snake_case__ )
return [m.group(0 ) for m in matches]
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] ):
A = 2 if text == '✅' or text == '❌' else len(snake_case__ )
A = (width - text_length) // 2
A = width - text_length - left_indent
return " " * left_indent + text + " " * right_indent
def _snake_case ( ):
A = transformers_module.models.auto.configuration_auto.CONFIG_MAPPING_NAMES
A = {
name: config_maping_names[code]
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if code in config_maping_names
}
A = {name: config.replace('Config' , '' ) for name, config in model_name_to_config.items()}
# Dictionaries flagging if each model prefix has a slow/fast tokenizer, backend in PT/TF/Flax.
A = collections.defaultdict(snake_case__ )
A = collections.defaultdict(snake_case__ )
A = collections.defaultdict(snake_case__ )
A = collections.defaultdict(snake_case__ )
A = collections.defaultdict(snake_case__ )
# Let's lookup through all transformers object (once).
for attr_name in dir(snake_case__ ):
A = None
if attr_name.endswith('Tokenizer' ):
A = slow_tokenizers
A = attr_name[:-9]
elif attr_name.endswith('TokenizerFast' ):
A = fast_tokenizers
A = attr_name[:-13]
elif _re_tf_models.match(snake_case__ ) is not None:
A = tf_models
A = _re_tf_models.match(snake_case__ ).groups()[0]
elif _re_flax_models.match(snake_case__ ) is not None:
A = flax_models
A = _re_flax_models.match(snake_case__ ).groups()[0]
elif _re_pt_models.match(snake_case__ ) is not None:
A = pt_models
A = _re_pt_models.match(snake_case__ ).groups()[0]
if lookup_dict is not None:
while len(snake_case__ ) > 0:
if attr_name in model_name_to_prefix.values():
A = True
break
# Try again after removing the last word in the name
A = ''.join(camel_case_split(snake_case__ )[:-1] )
# Let's build that table!
A = list(model_name_to_config.keys() )
model_names.sort(key=str.lower )
A = ['Model', 'Tokenizer slow', 'Tokenizer fast', 'PyTorch support', 'TensorFlow support', 'Flax Support']
# We'll need widths to properly display everything in the center (+2 is to leave one extra space on each side).
A = [len(snake_case__ ) + 2 for c in columns]
A = max([len(snake_case__ ) for name in model_names] ) + 2
# Build the table per se
A = '|' + '|'.join([_center_text(snake_case__ , snake_case__ ) for c, w in zip(snake_case__ , snake_case__ )] ) + '|\n'
# Use ":-----:" format to center-aligned table cell texts
table += "|" + "|".join([':' + '-' * (w - 2) + ':' for w in widths] ) + "|\n"
A = {True: '✅', False: '❌'}
for name in model_names:
A = model_name_to_prefix[name]
A = [
name,
check[slow_tokenizers[prefix]],
check[fast_tokenizers[prefix]],
check[pt_models[prefix]],
check[tf_models[prefix]],
check[flax_models[prefix]],
]
table += "|" + "|".join([_center_text(snake_case__ , snake_case__ ) for l, w in zip(snake_case__ , snake_case__ )] ) + "|\n"
return table
def _snake_case ( snake_case__ : Dict=False ):
A , A , A , A = _find_text_in_file(
filename=os.path.join(snake_case__ , 'index.md' ) , start_prompt='<!--This table is updated automatically from the auto modules' , end_prompt='<!-- End table-->' , )
A = get_model_table_from_auto_modules()
if current_table != new_table:
if overwrite:
with open(os.path.join(snake_case__ , 'index.md' ) , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lines[:start_index] + [new_table] + lines[end_index:] )
else:
raise ValueError(
'The model table in the `index.md` has not been updated. Run `make fix-copies` to fix this.' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
_lowercase = parser.parse_args()
check_model_table(args.fix_and_overwrite) | 713 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[int]=2 ,A_ : Any=True ,A_ : List[str]=False ,A_ : Tuple=10 ,A_ : List[Any]=3 ,A_ : Any=32 * 8 ,A_ : Dict=32 * 8 ,A_ : List[Any]=4 ,A_ : Tuple=64 ,) -> List[str]:
A = parent
A = batch_size
A = is_training
A = use_auxiliary_loss
A = num_queries
A = num_channels
A = min_size
A = max_size
A = num_labels
A = hidden_dim
A = hidden_dim
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A_ )
A = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=A_ )
A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=A_ ) > 0.5
).float()
A = (torch.rand((self.batch_size, self.num_labels) ,device=A_ ) > 0.5).long()
A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
A = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
A = self.num_queries
A = self.num_labels
A = [1, 1, 1, 1]
A = self.num_channels
A = 64
A = 128
A = self.hidden_dim
A = self.hidden_dim
A = self.hidden_dim
return config
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A , A , A , A , A = self.prepare_config_and_inputs()
A = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = output.encoder_hidden_states
A = output.pixel_decoder_hidden_states
A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,config.decoder_layers )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : List[str] ,A_ : Union[str, Any]=False ) -> str:
with torch.no_grad():
A = MaskaFormerModel(config=A_ )
model.to(A_ )
model.eval()
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ ,output_hidden_states=A_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Any ,A_ : Dict ,A_ : Any ,A_ : Dict ) -> Optional[Any]:
A = MaskaFormerForUniversalSegmentation(config=A_ )
model.to(A_ )
model.eval()
def comm_check_on_output(A_ : str ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ )
comm_check_on_output(A_ )
A = model(
pixel_values=A_ ,pixel_mask=A_ ,mask_labels=A_ ,class_labels=A_ )
comm_check_on_output(A_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowerCamelCase: Optional[Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_lowerCamelCase: int = False
_lowerCamelCase: Dict = False
_lowerCamelCase: List[str] = False
_lowerCamelCase: int = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = MaskaFormerModelTester(self )
A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
A = MaskaFormerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = (self.model_tester.min_size,) * 2
A = {
'pixel_values': torch.randn((2, 3, *size) ,device=A_ ),
'mask_labels': torch.randn((2, 10, *size) ,device=A_ ),
'class_labels': torch.zeros(2 ,10 ,device=A_ ).long(),
}
A = self.model_tester.get_config()
A = MaskaFormerForUniversalSegmentation(A_ ).to(A_ )
A = model(**A_ )
self.assertTrue(outputs.loss is not None )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ ).to(A_ )
A = model(**A_ ,output_attentions=A_ )
self.assertTrue(outputs.attentions is not None )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
if not self.model_tester.is_training:
return
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = model_class(A_ )
model.to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = True
A = True
A = model_class(A_ ).to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ )
A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowercase = 1e-4
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A_ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
A = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
# masks_queries_logits
A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
A = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
A = torch.tensor(A_ ).to(A_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,A_ ,atol=A_ ) )
# class_queries_logits
A = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
A = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(A_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
A = inputs['pixel_values'].to(A_ )
A = [el.to(A_ ) for el in inputs['mask_labels']]
A = [el.to(A_ ) for el in inputs['class_labels']]
with torch.no_grad():
A = model(**A_ )
self.assertTrue(outputs.loss is not None ) | 22 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {
'''configuration_mobilebert''': [
'''MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''MobileBertConfig''',
'''MobileBertOnnxConfig''',
],
'''tokenization_mobilebert''': ['''MobileBertTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''MobileBertTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MobileBertForMaskedLM''',
'''MobileBertForMultipleChoice''',
'''MobileBertForNextSentencePrediction''',
'''MobileBertForPreTraining''',
'''MobileBertForQuestionAnswering''',
'''MobileBertForSequenceClassification''',
'''MobileBertForTokenClassification''',
'''MobileBertLayer''',
'''MobileBertModel''',
'''MobileBertPreTrainedModel''',
'''load_tf_weights_in_mobilebert''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFMobileBertForMaskedLM''',
'''TFMobileBertForMultipleChoice''',
'''TFMobileBertForNextSentencePrediction''',
'''TFMobileBertForPreTraining''',
'''TFMobileBertForQuestionAnswering''',
'''TFMobileBertForSequenceClassification''',
'''TFMobileBertForTokenClassification''',
'''TFMobileBertMainLayer''',
'''TFMobileBertModel''',
'''TFMobileBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mobilebert import (
MOBILEBERT_PRETRAINED_CONFIG_ARCHIVE_MAP,
MobileBertConfig,
MobileBertOnnxConfig,
)
from .tokenization_mobilebert import MobileBertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mobilebert_fast import MobileBertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilebert import (
MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertLayer,
MobileBertModel,
MobileBertPreTrainedModel,
load_tf_weights_in_mobilebert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilebert import (
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileBertForMaskedLM,
TFMobileBertForMultipleChoice,
TFMobileBertForNextSentencePrediction,
TFMobileBertForPreTraining,
TFMobileBertForQuestionAnswering,
TFMobileBertForSequenceClassification,
TFMobileBertForTokenClassification,
TFMobileBertMainLayer,
TFMobileBertModel,
TFMobileBertPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 714 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = 99
A = 32
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.prepare_config_and_inputs()
A = True
A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict:
A = TFEsmModel(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]:
A = True
A = TFEsmModel(config=A_ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ ,encoder_hidden_states=A_ )
# Also check the case where encoder outputs are not passed
A = model(A_ ,attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict:
A = TFEsmForMaskedLM(config=A_ )
A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]:
A = self.num_labels
A = TFEsmForTokenClassification(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase: List[str] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = TFEsmModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFEsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A = model.get_bias()
assert isinstance(A_ ,A_ )
for k, v in name.items():
assert isinstance(A_ ,tf.Variable )
else:
A = model.get_output_embeddings()
assert x is None
A = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(A_ )[0]
A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,A_ )
# compare the actual values for a slice.
A = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A = model(A_ )[0]
# compare the actual values for a slice.
A = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) ) | 22 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : Tuple , snake_case__ : List[Any] ):
A = 0
A = len(snake_case__ ) - 1
while left <= right:
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(snake_case__ ):
return None
A = sorted_collection[point]
if current_item == item:
return point
else:
if point < left:
A = left
A = point
elif point > right:
A = right
A = point
else:
if item < current_item:
A = point - 1
else:
A = point + 1
return None
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : int , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ):
# avoid divided by 0 during interpolation
if sorted_collection[left] == sorted_collection[right]:
if sorted_collection[left] == item:
return left
else:
return None
A = left + ((item - sorted_collection[left]) * (right - left)) // (
sorted_collection[right] - sorted_collection[left]
)
# out of range check
if point < 0 or point >= len(snake_case__ ):
return None
if sorted_collection[point] == item:
return point
elif point < left:
return interpolation_search_by_recursion(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
elif point > right:
return interpolation_search_by_recursion(snake_case__ , snake_case__ , snake_case__ , snake_case__ )
else:
if sorted_collection[point] > item:
return interpolation_search_by_recursion(
snake_case__ , snake_case__ , snake_case__ , point - 1 )
else:
return interpolation_search_by_recursion(
snake_case__ , snake_case__ , point + 1 , snake_case__ )
def _snake_case ( snake_case__ : Dict ):
if collection != sorted(snake_case__ ):
raise ValueError('Collection must be ascending sorted' )
return True
if __name__ == "__main__":
import sys
_lowercase = 0
if debug == 1:
_lowercase = [10, 30, 40, 45, 50, 66, 77, 93]
try:
__assert_sorted(collection)
except ValueError:
sys.exit('''Sequence must be ascending sorted to apply interpolation search''')
_lowercase = 67
_lowercase = interpolation_search(collection, target)
if result is not None:
print(F"""{target} found at positions: {result}""")
else:
print('''Not found''') | 715 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
_lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
_lowercase = '''|'''.join(sys.argv[1:])
_lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""")
_lowercase = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''') | 22 | 0 |
"""simple docstring"""
import numpy as np
def _snake_case ( snake_case__ : np.ndarray , snake_case__ : np.ndarray , snake_case__ : float = 1e-12 , snake_case__ : int = 100 , ):
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[1]
# Ensure proper dimensionality.
assert np.shape(snake_case__ )[0] == np.shape(snake_case__ )[0]
# Ensure inputs are either both complex or both real
assert np.iscomplexobj(snake_case__ ) == np.iscomplexobj(snake_case__ )
A = np.iscomplexobj(snake_case__ )
if is_complex:
# Ensure complex input_matrix is Hermitian
assert np.array_equal(snake_case__ , input_matrix.conj().T )
# Set convergence to False. Will define convergence when we exceed max_iterations
# or when we have small changes from one iteration to next.
A = False
A = 0
A = 0
A = 1e12
while not convergence:
# Multiple matrix by the vector.
A = np.dot(snake_case__ , snake_case__ )
# Normalize the resulting output vector.
A = w / np.linalg.norm(snake_case__ )
# Find rayleigh quotient
# (faster than usual b/c we know vector is normalized already)
A = vector.conj().T if is_complex else vector.T
A = np.dot(snake_case__ , np.dot(snake_case__ , snake_case__ ) )
# Check convergence.
A = np.abs(lambda_ - lambda_previous ) / lambda_
iterations += 1
if error <= error_tol or iterations >= max_iterations:
A = True
A = lambda_
if is_complex:
A = np.real(lambda_ )
return lambda_, vector
def _snake_case ( ):
A = np.array([[41, 4, 20], [4, 26, 30], [20, 30, 50]] )
A = np.array([41, 4, 20] )
A = real_input_matrix.astype(np.complexaaa )
A = np.triu(1j * complex_input_matrix , 1 )
complex_input_matrix += imag_matrix
complex_input_matrix += -1 * imag_matrix.T
A = np.array([41, 4, 20] ).astype(np.complexaaa )
for problem_type in ["real", "complex"]:
if problem_type == "real":
A = real_input_matrix
A = real_vector
elif problem_type == "complex":
A = complex_input_matrix
A = complex_vector
# Our implementation.
A , A = power_iteration(snake_case__ , snake_case__ )
# Numpy implementation.
# Get eigenvalues and eigenvectors using built-in numpy
# eigh (eigh used for symmetric or hermetian matrices).
A , A = np.linalg.eigh(snake_case__ )
# Last eigenvalue is the maximum one.
A = eigen_values[-1]
# Last column in this matrix is eigenvector corresponding to largest eigenvalue.
A = eigen_vectors[:, -1]
# Check our implementation and numpy gives close answers.
assert np.abs(eigen_value - eigen_value_max ) <= 1e-6
# Take absolute values element wise of each eigenvector.
# as they are only unique to a minus sign.
assert np.linalg.norm(np.abs(snake_case__ ) - np.abs(snake_case__ ) ) <= 1e-6
if __name__ == "__main__":
import doctest
doctest.testmod()
test_power_iteration() | 716 |
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> int:
A = []
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Optional[int]:
return self.node_position[vertex]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> List[Any]:
A = pos
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Dict ,A_ : List[str] ) -> str:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A = 2 * start + 1
else:
A = 2 * start + 2
if heap[smallest_child] < heap[start]:
A , A = heap[smallest_child], positions[smallest_child]
A , A = (
heap[start],
positions[start],
)
A , A = temp, tempa
A = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,A_ )
self.top_to_bottom(A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Dict ,A_ : str ,A_ : Union[str, Any] ) -> Dict:
A = position[index]
while index != 0:
A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A = heap[parent]
A = position[parent]
self.set_position(position[parent] ,A_ )
else:
A = val
A = temp
self.set_position(A_ ,A_ )
break
A = parent
else:
A = val
A = temp
self.set_position(A_ ,0 )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Dict ) -> Union[str, Any]:
A = len(A_ ) // 2 - 1
for i in range(A_ ,-1 ,-1 ):
self.top_to_bottom(A_ ,A_ ,len(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Dict ) -> Union[str, Any]:
A = positions[0]
A = sys.maxsize
self.top_to_bottom(A_ ,0 ,len(A_ ) ,A_ )
return temp
def _snake_case ( snake_case__ : Dict ):
A = Heap()
A = [0] * len(snake_case__ )
A = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A = [] # Heap of Distance of vertices from their neighboring vertex
A = []
for vertex in range(len(snake_case__ ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case__ )
heap.node_position.append(snake_case__ )
A = []
A = 1
A = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A = 0
A = distance
heap.heapify(snake_case__ , snake_case__ )
for _ in range(1 , len(snake_case__ ) ):
A = heap.delete_minimum(snake_case__ , snake_case__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case__ )]
):
A = distance
heap.bottom_to_top(
snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ )
A = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowercase = int(input('''Enter number of edges: ''').strip())
_lowercase = defaultdict(list)
for _ in range(edges_number):
_lowercase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 22 | 0 |
"""simple docstring"""
import math
from typing import Dict, Iterable, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import normalize, rescale, resize, to_channel_dimension_format
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
get_image_size,
is_torch_available,
is_torch_tensor,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_torch_available():
import torch
if is_vision_available():
import PIL
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : np.ndarray , snake_case__ : Union[int, Iterable[int]] , snake_case__ : bool , snake_case__ : int ):
def constraint_to_multiple_of(snake_case__ : Dict , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=0 , snake_case__ : List[Any]=None ):
A = round(val / multiple ) * multiple
if max_val is not None and x > max_val:
A = math.floor(val / multiple ) * multiple
if x < min_val:
A = math.ceil(val / multiple ) * multiple
return x
A = (output_size, output_size) if isinstance(snake_case__ , snake_case__ ) else output_size
A , A = get_image_size(snake_case__ )
A , A = output_size
# determine new height and width
A = output_height / input_height
A = output_width / input_width
if keep_aspect_ratio:
# scale as little as possible
if abs(1 - scale_width ) < abs(1 - scale_height ):
# fit width
A = scale_width
else:
# fit height
A = scale_height
A = constraint_to_multiple_of(scale_height * input_height , multiple=snake_case__ )
A = constraint_to_multiple_of(scale_width * input_width , multiple=snake_case__ )
return (new_height, new_width)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: str = ['''pixel_values''']
def __init__( self : Optional[Any] ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : bool = False ,A_ : int = 1 ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Any ,) -> None:
super().__init__(**A_ )
A = size if size is not None else {'height': 384, 'width': 384}
A = get_size_dict(A_ )
A = do_resize
A = size
A = keep_aspect_ratio
A = ensure_multiple_of
A = resample
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : bool = False ,A_ : int = 1 ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The size dictionary must contain the keys \'height\' and \'width\'. Got {size.keys()}' )
A = get_resize_output_image_size(
A_ ,output_size=(size['height'], size['width']) ,keep_aspect_ratio=A_ ,multiple=A_ ,)
return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : np.ndarray ,A_ : Union[int, float] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[str] ,) -> Tuple:
return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Tuple ,) -> np.ndarray:
return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : ImageInput ,A_ : bool = None ,A_ : int = None ,A_ : bool = None ,A_ : int = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : float = None ,A_ : bool = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : ChannelDimension = ChannelDimension.FIRST ,**A_ : Optional[Any] ,) -> PIL.Image.Image:
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(A_ )
A = keep_aspect_ratio if keep_aspect_ratio is not None else self.keep_aspect_ratio
A = ensure_multiple_of if ensure_multiple_of is not None else self.ensure_multiple_of
A = resample if resample is not None else self.resample
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(A_ ) for image in images]
if do_resize:
A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images]
if do_rescale:
A = [self.rescale(image=A_ ,scale=A_ ) for image in images]
if do_normalize:
A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images]
A = [to_channel_dimension_format(A_ ,A_ ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=A_ ,tensor_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : List[Tuple] = None ) -> Tuple:
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(A_ ):
A = target_sizes.numpy()
A = []
for idx in range(len(A_ ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=A_ )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation
| 717 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase = True
except ImportError:
_lowercase = False
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( snake_case__ : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]:
A = testing
A = testing_file
A = path
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A = (
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file ,'r' ) as configuration_file:
A = json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,)
A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' ,'r' ) as configuration_file:
A = json.load(A_ )
A = configuration['lowercase_modelname']
A = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
A = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A = 'Flax' in generate_tensorflow_pytorch_and_flax
A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A_ ,exist_ok=A_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ):
pass
shutil.move(
F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(A_ : int ):
with open(A_ ,'r' ) as f:
A = f.readlines()
with open(A_ ,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A_ : str ,A_ : str ,A_ : List[str] ):
# Create temp file
A , A = mkstemp()
A = False
with fdopen(A_ ,'w' ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
A = True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A_ ,A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ ,A_ )
def skip_units(A_ : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A_ : Tuple ):
with open(A_ ) as datafile:
A = []
A = False
A = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# Below: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ ,A_ ,A_ )
A = []
elif "# Replace with" in line and "##" not in line:
A = []
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A_ ) | 22 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowercase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''MLukeTokenizer''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 718 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str:
A = size if size is not None else {'height': 18, 'width': 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = ImageGPTImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'clusters' ) )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
self.assertTrue(hasattr(A_ ,'do_normalize' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
A = self.image_processing_class(**self.image_processor_dict )
A = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,obj[key] ) )
else:
self.assertEqual(obj[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(A_ ,'image_processor.json' )
image_processor_first.to_json_file(A_ )
A = self.image_processing_class.from_json_file(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(A_ )
A = self.image_processing_class.from_pretrained(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
pass
def _snake_case ( ):
A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
A = Image.open(dataset[4]['file'] )
A = Image.open(dataset[5]['file'] )
A = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
A = prepare_images()
# test non-batched
A = image_processing(images[0] ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1024) )
A = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ )
# test batched
A = image_processing(A_ ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1024) )
A = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ ) | 22 | 0 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
_lowercase = logging.get_logger(__name__)
_lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset)
def _snake_case ( snake_case__ : List[DatasetType] , snake_case__ : Optional[List[float]] = None , snake_case__ : Optional[int] = None , snake_case__ : Optional[DatasetInfo] = None , snake_case__ : Optional[NamedSplit] = None , snake_case__ : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(snake_case__ ):
if not isinstance(snake_case__ , (Dataset, IterableDataset) ):
if isinstance(snake_case__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(snake_case__ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case__ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case__ ).__name__}.' )
if i == 0:
A , A = (
(Dataset, IterableDataset) if isinstance(snake_case__ , snake_case__ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case__ , snake_case__ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(F'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
snake_case__ , snake_case__ , snake_case__ , info=snake_case__ , split=snake_case__ , stopping_strategy=snake_case__ )
else:
return _interleave_iterable_datasets(
snake_case__ , snake_case__ , snake_case__ , info=snake_case__ , split=snake_case__ , stopping_strategy=snake_case__ )
def _snake_case ( snake_case__ : List[DatasetType] , snake_case__ : Optional[DatasetInfo] = None , snake_case__ : Optional[NamedSplit] = None , snake_case__ : int = 0 , ):
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(snake_case__ ):
if not isinstance(snake_case__ , (Dataset, IterableDataset) ):
if isinstance(snake_case__ , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
F'Dataset at position {i} has at least one split: {list(snake_case__ )}\n'
F'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(snake_case__ ) )}\']' )
raise ValueError(
F'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(snake_case__ ).__name__}.' )
if i == 0:
A , A = (
(Dataset, IterableDataset) if isinstance(snake_case__ , snake_case__ ) else (IterableDataset, Dataset)
)
elif not isinstance(snake_case__ , snake_case__ ):
raise ValueError(
F'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(snake_case__ , info=snake_case__ , split=snake_case__ , axis=snake_case__ )
else:
return _concatenate_iterable_datasets(snake_case__ , info=snake_case__ , split=snake_case__ , axis=snake_case__ ) | 719 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _snake_case ( snake_case__ : Optional[int] ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' )
download_parser.add_argument(
'--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,)
download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' )
download_parser.set_defaults(func=A_ )
def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]:
A = model
A = cache
A = force
A = trust_remote_code
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code ) | 22 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : list[int] , snake_case__ : str ):
A = int(snake_case__ )
# Initialize Result
A = []
# Traverse through all denomination
for denomination in reversed(snake_case__ ):
# Find denominations
while int(snake_case__ ) >= int(snake_case__ ):
total_value -= int(snake_case__ )
answer.append(snake_case__ ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
_lowercase = []
_lowercase = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
_lowercase = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"""Denomination {i}: """).strip()))
_lowercase = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
_lowercase = [1, 2, 5, 10, 20, 50, 1_00, 5_00, 20_00]
_lowercase = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"""Following is minimal change for {value}: """)
_lowercase = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''') | 720 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''spm_char.model'''}
_lowercase = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
_lowercase = {
'''microsoft/speecht5_asr''': 10_24,
'''microsoft/speecht5_tts''': 10_24,
'''microsoft/speecht5_vc''': 10_24,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[Any]="</s>" ,A_ : Optional[Any]="<unk>" ,A_ : str="<pad>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[str] ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Any:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Optional[int] ,A_ : str ) -> Tuple:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> List[Any]:
A = self.sp_model.IdToPiece(A_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]:
A = []
A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
A = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : Optional[int]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ )
A = [1]
if token_ids_a is None:
return ([0] * len(A_ )) + suffix_ones
return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 22 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : int , snake_case__ : int , snake_case__ : set ):
A , A = len(snake_case__ ), len(grid[0] )
if (
min(snake_case__ , snake_case__ ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
A = 0
count += depth_first_search(snake_case__ , row + 1 , snake_case__ , snake_case__ )
count += depth_first_search(snake_case__ , row - 1 , snake_case__ , snake_case__ )
count += depth_first_search(snake_case__ , snake_case__ , col + 1 , snake_case__ )
count += depth_first_search(snake_case__ , snake_case__ , col - 1 , snake_case__ )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod() | 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPFeatureExtractor''']
_lowercase = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 0 |
import requests
SCREAMING_SNAKE_CASE = "YOUR API KEY"
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = giphy_api_key ) -> list:
UpperCAmelCase_ = "+".join(query.split() )
UpperCAmelCase_ = f'''https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}'''
UpperCAmelCase_ = requests.get(__SCREAMING_SNAKE_CASE ).json()["data"]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print("\n".join(get_gifs("space ship")))
| 23 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
# Initialise PyTorch model
UpperCAmelCase_ = MobileBertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ = MobileBertForPreTraining(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
UpperCAmelCase_ = load_tf_weights_in_mobilebert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 | 1 |
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionSAGPipeline,
UNetaDConditionModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Any = StableDiffusionSAGPipeline
lowerCAmelCase_ : Union[str, Any] = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ : Any = TEXT_TO_IMAGE_BATCH_PARAMS
lowerCAmelCase_ : Union[str, Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ : str = TEXT_TO_IMAGE_IMAGE_PARAMS
lowerCAmelCase_ : List[Any] = False
def A__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=32 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
UpperCAmelCase_ = CLIPTextModel(lowerCAmelCase )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase_ = {
"prompt": ".",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 1.0,
"sag_scale": 1.0,
"output_type": "numpy",
}
return inputs
def A__ ( self ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
UpperCAmelCase_ = StableDiffusionSAGPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
UpperCAmelCase_ = sag_pipe.to(lowerCAmelCase )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = "."
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sag_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.1568, 0.1738, 0.1695, 0.1693, 0.1507, 0.1705, 0.1547, 0.1751, 0.1949] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def A__ ( self ):
UpperCAmelCase_ = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
UpperCAmelCase_ = sag_pipe.to(lowerCAmelCase )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = "."
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sag_pipe(
[prompt] , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" )
UpperCAmelCase_ = output.images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
UpperCAmelCase_ = np.array([0.3459, 0.2876, 0.2537, 0.3002, 0.2671, 0.2160, 0.3026, 0.2262, 0.2371] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-2
def A__ ( self ):
UpperCAmelCase_ = StableDiffusionSAGPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
UpperCAmelCase_ = sag_pipe.to(lowerCAmelCase )
sag_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = "."
UpperCAmelCase_ = torch.manual_seed(0 )
UpperCAmelCase_ = sag_pipe(
[prompt] , width=768 , height=512 , generator=lowerCAmelCase , guidance_scale=7.5 , sag_scale=1.0 , num_inference_steps=20 , output_type="np" , )
UpperCAmelCase_ = output.images
assert image.shape == (1, 512, 768, 3)
| 23 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = str(id_ )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = []
UpperCAmelCase_ = {} # {vertex:distance}
def __lt__( self , lowerCAmelCase ):
return self.key < other.key
def __repr__( self ):
return self.id
def A__ ( self , lowerCAmelCase ):
self.neighbors.append(lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = weight
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __SCREAMING_SNAKE_CASE )
graph[b - 1].add_edge(graph[a - 1] , __SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list:
UpperCAmelCase_ = []
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = graph[:]
while q:
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE )
q.remove(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Iterator[tuple]:
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
hq.heapify(__SCREAMING_SNAKE_CASE )
while h:
UpperCAmelCase_ = hq.heappop(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
hq.heapify(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 1 |
from __future__ import annotations
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
if not nums:
return 0
UpperCAmelCase_ = nums[0]
UpperCAmelCase_ = 0
for num in nums[1:]:
UpperCAmelCase_ , UpperCAmelCase_ = (
max_excluding + num,
max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ),
)
return max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 1 |
import re
from pathlib import Path
from unittest import TestCase
import pytest
@pytest.mark.integration
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def A__ ( self , lowerCAmelCase ):
with open(lowerCAmelCase , encoding="utf-8" ) as input_file:
UpperCAmelCase_ = re.compile(r"(?!.*\b(?:encoding|rb|w|wb|w+|wb+|ab|ab+)\b)(?<=\s)(open)\((.*)\)" )
UpperCAmelCase_ = input_file.read()
UpperCAmelCase_ = regexp.search(lowerCAmelCase )
return match
def A__ ( self , lowerCAmelCase ):
with open(lowerCAmelCase , encoding="utf-8" ) as input_file:
UpperCAmelCase_ = re.compile(r"#[^\r\n]*print\(|\"[^\r\n]*print\(|\"\"\".*?print\(.*?\"\"\"|(print\()" , re.DOTALL )
UpperCAmelCase_ = input_file.read()
# use `re.finditer` to handle the case where the ignored groups would be matched first by `re.search`
UpperCAmelCase_ = regexp.finditer(lowerCAmelCase )
UpperCAmelCase_ = [match for match in matches if match is not None and match.group(1 ) is not None]
return matches[0] if matches else None
def A__ ( self ):
UpperCAmelCase_ = Path("./datasets" )
UpperCAmelCase_ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_encoding_on_file_open(str(lowerCAmelCase ) ):
raise AssertionError(f'''open(...) must use utf-8 encoding in {dataset}''' )
def A__ ( self ):
UpperCAmelCase_ = Path("./datasets" )
UpperCAmelCase_ = list(dataset_paths.absolute().glob("**/*.py" ) )
for dataset in dataset_files:
if self._no_print_statements(str(lowerCAmelCase ) ):
raise AssertionError(f'''print statement found in {dataset}. Use datasets.logger/logging instead.''' )
| 23 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": 512,
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : List[str] = RetriBertTokenizer
lowerCAmelCase_ : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 1 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
# Initialise PyTorch model
UpperCAmelCase_ = MobileBertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ = MobileBertForPreTraining(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
UpperCAmelCase_ = load_tf_weights_in_mobilebert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : int = ['input_ids', 'attention_mask']
lowerCAmelCase_ : str = DistilBertTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"microsoft/trocr-base-handwritten": (
"https://huggingface.co/microsoft/trocr-base-handwritten/resolve/main/config.json"
),
# See all TrOCR models at https://huggingface.co/models?filter=trocr
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = 'trocr'
lowerCAmelCase_ : Union[str, Any] = ['past_key_values']
lowerCAmelCase_ : List[Any] = {
'num_attention_heads': 'decoder_attention_heads',
'hidden_size': 'd_model',
'num_hidden_layers': 'decoder_layers',
}
def __init__( self , lowerCAmelCase=5_0265 , lowerCAmelCase=1024 , lowerCAmelCase=12 , lowerCAmelCase=16 , lowerCAmelCase=4096 , lowerCAmelCase="gelu" , lowerCAmelCase=512 , lowerCAmelCase=0.1 , lowerCAmelCase=0.0 , lowerCAmelCase=0.0 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=0.0 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , **lowerCAmelCase , ):
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = d_model
UpperCAmelCase_ = decoder_layers
UpperCAmelCase_ = decoder_attention_heads
UpperCAmelCase_ = decoder_ffn_dim
UpperCAmelCase_ = activation_function
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = activation_dropout
UpperCAmelCase_ = init_std
UpperCAmelCase_ = decoder_layerdrop
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = scale_embedding
UpperCAmelCase_ = use_learned_position_embeddings
UpperCAmelCase_ = layernorm_embedding
super().__init__(
pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , decoder_start_token_id=lowerCAmelCase , **lowerCAmelCase , )
| 23 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.array:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
UpperCAmelCase_ = "f32le"
UpperCAmelCase_ = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase_ = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
UpperCAmelCase_ = output_stream[0]
UpperCAmelCase_ = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "f32le" , ) -> Dict:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
if format_for_conversion == "s16le":
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCAmelCase_ = platform.system()
if system == "Linux":
UpperCAmelCase_ = "alsa"
UpperCAmelCase_ = "default"
elif system == "Darwin":
UpperCAmelCase_ = "avfoundation"
UpperCAmelCase_ = ":0"
elif system == "Windows":
UpperCAmelCase_ = "dshow"
UpperCAmelCase_ = "default"
UpperCAmelCase_ = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase_ = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "f32le" , ) -> int:
if stream_chunk_s is not None:
UpperCAmelCase_ = stream_chunk_s
else:
UpperCAmelCase_ = chunk_length_s
UpperCAmelCase_ = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
UpperCAmelCase_ = np.intaa
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = np.floataa
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCAmelCase_ = chunk_length_s / 6
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
UpperCAmelCase_ = [stride_length_s, stride_length_s]
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase_ = datetime.datetime.now()
UpperCAmelCase_ = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
UpperCAmelCase_ = np.frombuffer(item["raw"] , dtype=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
UpperCAmelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Dict:
UpperCAmelCase_ = B""
UpperCAmelCase_ , UpperCAmelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCAmelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
UpperCAmelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase_ = (_stride_left, stride_right)
UpperCAmelCase_ = {"raw": acc[:chunk_len], "stride": stride}
if stream:
UpperCAmelCase_ = False
yield item
UpperCAmelCase_ = stride_left
UpperCAmelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
UpperCAmelCase_ = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
UpperCAmelCase_ = False
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
UpperCAmelCase_ = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 23 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : torch.FloatTensor
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 32 , lowerCAmelCase = 64 , lowerCAmelCase = 20 , lowerCAmelCase = 768 , lowerCAmelCase=77 , lowerCAmelCase=4 , lowerCAmelCase = 0.0 , lowerCAmelCase = "silu" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "linear" , lowerCAmelCase = "prd" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ):
super().__init__()
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = attention_head_dim
UpperCAmelCase_ = num_attention_heads * attention_head_dim
UpperCAmelCase_ = additional_embeddings
UpperCAmelCase_ = time_embed_dim or inner_dim
UpperCAmelCase_ = embedding_proj_dim or embedding_dim
UpperCAmelCase_ = clip_embed_dim or embedding_dim
UpperCAmelCase_ = Timesteps(lowerCAmelCase , lowerCAmelCase , 0 )
UpperCAmelCase_ = TimestepEmbedding(lowerCAmelCase , lowerCAmelCase , out_dim=lowerCAmelCase , act_fn=lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if embedding_proj_norm_type is None:
UpperCAmelCase_ = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if encoder_hid_proj_type is None:
UpperCAmelCase_ = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCAmelCase ) )
if added_emb_type == "prd":
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , 1 , lowerCAmelCase ) )
elif added_emb_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , dropout=lowerCAmelCase , activation_fn="gelu" , attention_bias=lowerCAmelCase , )
for d in range(lowerCAmelCase )
] )
if norm_in_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
elif norm_in_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , lowerCAmelCase , persistent=lowerCAmelCase )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ):
UpperCAmelCase_ = {}
def fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
UpperCAmelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return processors
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
module.set_processor(lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
self.set_attn_processor(AttnProcessor() )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , ):
UpperCAmelCase_ = hidden_states.shape[0]
UpperCAmelCase_ = timestep
if not torch.is_tensor(lowerCAmelCase ):
UpperCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ = timesteps * torch.ones(lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase_ = self.time_proj(lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ = self.time_embedding(lowerCAmelCase )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ = self.embedding_proj_norm(lowerCAmelCase )
UpperCAmelCase_ = self.embedding_proj(lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ = self.encoder_hidden_states_proj(lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
UpperCAmelCase_ = self.proj_in(lowerCAmelCase )
UpperCAmelCase_ = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ = hidden_states[:, None, :]
UpperCAmelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCAmelCase , -1 , -1 )
additional_embeds.append(lowerCAmelCase )
UpperCAmelCase_ = torch.cat(
lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ = F.pad(
lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
UpperCAmelCase_ = F.pad(lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ = self.norm_in(lowerCAmelCase )
for block in self.transformer_blocks:
UpperCAmelCase_ = block(lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase_ = self.norm_out(lowerCAmelCase )
if self.prd_embedding is not None:
UpperCAmelCase_ = hidden_states[:, -1]
else:
UpperCAmelCase_ = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ = self.proj_to_clip_embeddings(lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 23 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 768 , ):
super().__init__()
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.ones(1 , lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase = None , lowerCAmelCase = None , ):
UpperCAmelCase_ = nn.Parameter(self.mean.to(lowerCAmelCase ).to(lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(self.std.to(lowerCAmelCase ).to(lowerCAmelCase ) )
return self
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds * self.std) + self.mean
return embeds
| 23 | 1 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def A__ ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
UpperCAmelCase_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def A__ ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
UpperCAmelCase_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def A__ ( self ):
UpperCAmelCase_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def A__ ( self ):
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
UpperCAmelCase_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = Accelerator()
SCREAMING_SNAKE_CASE = (accelerator.state.process_index + 2, 10)
SCREAMING_SNAKE_CASE = torch.randint(0, 10, shape).to(accelerator.device)
SCREAMING_SNAKE_CASE = ""
SCREAMING_SNAKE_CASE = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
SCREAMING_SNAKE_CASE = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
SCREAMING_SNAKE_CASE = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 23 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
if prompt is not None:
UpperCAmelCase_ = prompt
if generate_kwargs is not None:
UpperCAmelCase_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCAmelCase_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
UpperCAmelCase_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCAmelCase , **lowerCAmelCase ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
UpperCAmelCase_ = self.model.config.model_type
if model_type == "git":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(text=lowerCAmelCase , add_special_tokens=lowerCAmelCase ).input_ids
UpperCAmelCase_ = [self.tokenizer.cls_token_id] + input_ids
UpperCAmelCase_ = torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , header_text=lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCAmelCase_ = None
return model_inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCAmelCase )
and all(x is None for x in model_inputs["input_ids"] )
):
UpperCAmelCase_ = None
if generate_kwargs is None:
UpperCAmelCase_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCAmelCase_ = model_inputs.pop(self.model.main_input_name )
UpperCAmelCase_ = self.model.generate(lowerCAmelCase , **lowerCAmelCase , **lowerCAmelCase )
return model_outputs
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = []
for output_ids in model_outputs:
UpperCAmelCase_ = {
"generated_text": self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , )
}
records.append(lowerCAmelCase )
return records
| 23 | 1 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
SCREAMING_SNAKE_CASE = {
"gpt-neox-20b": 2048,
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : int = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : List[str] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase=False , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCAmelCase ) != add_prefix_space:
UpperCAmelCase_ = getattr(lowerCAmelCase , pre_tok_state.pop("type" ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**lowerCAmelCase )
UpperCAmelCase_ = add_prefix_space
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
| 23 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = TextToVideoSDPipeline
lowerCAmelCase_ : Dict = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowerCAmelCase_ : Optional[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def A__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(lowerCAmelCase )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def A__ ( self ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = TextToVideoSDPipeline(**lowerCAmelCase )
UpperCAmelCase_ = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase_ = "np"
UpperCAmelCase_ = sd_pipe(**lowerCAmelCase ).frames
UpperCAmelCase_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
UpperCAmelCase_ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=1e-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def A__ ( self ):
pass
def A__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=25 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 23 | 1 |
import argparse
import datetime
import json
import time
import warnings
from logging import getLogger
from pathlib import Path
from typing import Dict, List
import torch
from tqdm import tqdm
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
from utils import calculate_bleu, calculate_rouge, chunks, parse_numeric_n_bool_cl_kwargs, use_task_specific_params
SCREAMING_SNAKE_CASE = getLogger(__name__)
SCREAMING_SNAKE_CASE = "cuda" if torch.cuda.is_available() else "cpu"
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 8 , __SCREAMING_SNAKE_CASE = DEFAULT_DEVICE , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE="summarization" , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> Dict:
UpperCAmelCase_ = Path(__SCREAMING_SNAKE_CASE ).open("w" , encoding="utf-8" )
UpperCAmelCase_ = str(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(__SCREAMING_SNAKE_CASE ).to(__SCREAMING_SNAKE_CASE )
if fpaa:
UpperCAmelCase_ = model.half()
UpperCAmelCase_ = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
logger.info(f'''Inferred tokenizer type: {tokenizer.__class__}''' ) # if this is wrong, check config.model_type.
UpperCAmelCase_ = time.time()
# update config with task specific params
use_task_specific_params(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if prefix is None:
UpperCAmelCase_ = prefix or getattr(model.config , "prefix" , "" ) or ""
for examples_chunk in tqdm(list(chunks(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) ):
UpperCAmelCase_ = [prefix + text for text in examples_chunk]
UpperCAmelCase_ = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="pt" , truncation=__SCREAMING_SNAKE_CASE , padding="longest" ).to(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = model.generate(
input_ids=batch.input_ids , attention_mask=batch.attention_mask , **__SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ = tokenizer.batch_decode(__SCREAMING_SNAKE_CASE , skip_special_tokens=__SCREAMING_SNAKE_CASE , clean_up_tokenization_spaces=__SCREAMING_SNAKE_CASE )
for hypothesis in dec:
fout.write(hypothesis + "\n" )
fout.flush()
fout.close()
UpperCAmelCase_ = int(time.time() - start_time ) # seconds
UpperCAmelCase_ = len(__SCREAMING_SNAKE_CASE )
return {"n_obs": n_obs, "runtime": runtime, "seconds_per_sample": round(runtime / n_obs , 4 )}
def snake_case__ ( ) -> Union[str, Any]:
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S" )
def snake_case__ ( __SCREAMING_SNAKE_CASE=True ) -> str:
UpperCAmelCase_ = argparse.ArgumentParser()
parser.add_argument("model_name" , type=__SCREAMING_SNAKE_CASE , help="like facebook/bart-large-cnn,t5-base, etc." )
parser.add_argument("input_path" , type=__SCREAMING_SNAKE_CASE , help="like cnn_dm/test.source" )
parser.add_argument("save_path" , type=__SCREAMING_SNAKE_CASE , help="where to save summaries" )
parser.add_argument("--reference_path" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="like cnn_dm/test.target" )
parser.add_argument("--score_path" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , default="metrics.json" , help="where to save metrics" )
parser.add_argument("--device" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="cuda, cuda:1, cpu etc." )
parser.add_argument(
"--prefix" , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="will be added to the begininng of src examples" )
parser.add_argument("--task" , type=__SCREAMING_SNAKE_CASE , default="summarization" , help="used for task_specific_params + metrics" )
parser.add_argument("--bs" , type=__SCREAMING_SNAKE_CASE , default=8 , required=__SCREAMING_SNAKE_CASE , help="batch size" )
parser.add_argument(
"--n_obs" , type=__SCREAMING_SNAKE_CASE , default=-1 , required=__SCREAMING_SNAKE_CASE , help="How many observations. Defaults to all." )
parser.add_argument("--fp16" , action="store_true" )
parser.add_argument("--dump-args" , action="store_true" , help="print the custom hparams with the results" )
parser.add_argument(
"--info" , nargs="?" , type=__SCREAMING_SNAKE_CASE , const=datetime_now() , help=(
"use in conjunction w/ --dump-args to print with the results whatever other info you'd like, e.g."
" lang=en-ru. If no value is passed, the current datetime string will be used."
) , )
# Unspecified args like --num_beams=2 --decoder_start_token_id=4 are passed to model.generate
UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_known_args()
UpperCAmelCase_ = parse_numeric_n_bool_cl_kwargs(__SCREAMING_SNAKE_CASE )
if parsed_args and verbose:
print(f'''parsed the following generate kwargs: {parsed_args}''' )
UpperCAmelCase_ = [" " + x.rstrip() if "t5" in args.model_name else x.rstrip() for x in open(args.input_path ).readlines()]
if args.n_obs > 0:
UpperCAmelCase_ = examples[: args.n_obs]
Path(args.save_path ).parent.mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
if args.reference_path is None and Path(args.score_path ).exists():
warnings.warn(f'''score_path {args.score_path} will be overwritten unless you type ctrl-c.''' )
if args.device == "cpu" and args.fpaa:
# this mix leads to RuntimeError: "threshold_cpu" not implemented for 'Half'
raise ValueError("Can't mix --fp16 and --device cpu" )
UpperCAmelCase_ = generate_summaries_or_translations(
__SCREAMING_SNAKE_CASE , args.save_path , args.model_name , batch_size=args.bs , device=args.device , fpaa=args.fpaa , task=args.task , prefix=args.prefix , **__SCREAMING_SNAKE_CASE , )
if args.reference_path is None:
return {}
# Compute scores
UpperCAmelCase_ = calculate_bleu if "translation" in args.task else calculate_rouge
UpperCAmelCase_ = [x.rstrip() for x in open(args.save_path ).readlines()]
UpperCAmelCase_ = [x.rstrip() for x in open(args.reference_path ).readlines()][: len(__SCREAMING_SNAKE_CASE )]
UpperCAmelCase_ = score_fn(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
scores.update(__SCREAMING_SNAKE_CASE )
if args.dump_args:
scores.update(__SCREAMING_SNAKE_CASE )
if args.info:
UpperCAmelCase_ = args.info
if verbose:
print(__SCREAMING_SNAKE_CASE )
if args.score_path is not None:
json.dump(__SCREAMING_SNAKE_CASE , open(args.score_path , "w" ) )
return scores
if __name__ == "__main__":
# Usage for MT:
# python run_eval.py MODEL_NAME $DATA_DIR/test.source $save_dir/test_translations.txt --reference_path $DATA_DIR/test.target --score_path $save_dir/test_bleu.json --task translation $@
run_generate(verbose=True)
| 23 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0
UpperCAmelCase_ = ugly_nums[ia] * 2
UpperCAmelCase_ = ugly_nums[ia] * 3
UpperCAmelCase_ = ugly_nums[ia] * 5
for _ in range(1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
ugly_nums.append(__SCREAMING_SNAKE_CASE )
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 23 | 1 |
from typing import Any, Callable, Dict, List, Optional, Union
import torch
from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DiffusionPipeline,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionPipeline,
UNetaDConditionModel,
)
from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
SCREAMING_SNAKE_CASE = "CompVis/stable-diffusion-v1-1"
SCREAMING_SNAKE_CASE = "CompVis/stable-diffusion-v1-2"
SCREAMING_SNAKE_CASE = "CompVis/stable-diffusion-v1-3"
SCREAMING_SNAKE_CASE = "CompVis/stable-diffusion-v1-4"
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = True , ):
super()._init_()
UpperCAmelCase_ = StableDiffusionPipeline.from_pretrained(lowerCAmelCase )
UpperCAmelCase_ = StableDiffusionPipeline.from_pretrained(lowerCAmelCase )
UpperCAmelCase_ = StableDiffusionPipeline.from_pretrained(lowerCAmelCase )
UpperCAmelCase_ = StableDiffusionPipeline(
vae=lowerCAmelCase , text_encoder=lowerCAmelCase , tokenizer=lowerCAmelCase , unet=lowerCAmelCase , scheduler=lowerCAmelCase , safety_checker=lowerCAmelCase , feature_extractor=lowerCAmelCase , requires_safety_checker=lowerCAmelCase , )
self.register_modules(pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea , pipelinea=self.pipea )
@property
def A__ ( self ):
return {k: getattr(self , lowerCAmelCase ) for k in self.config.keys() if not k.startswith("_" )}
def A__ ( self , lowerCAmelCase = "auto" ):
if slice_size == "auto":
# half the attention head size is usually a good trade-off between
# speed and memory
UpperCAmelCase_ = self.unet.config.attention_head_dim // 2
self.unet.set_attention_slice(lowerCAmelCase )
def A__ ( self ):
self.enable_attention_slicing(lowerCAmelCase )
@torch.no_grad()
def A__ ( self , lowerCAmelCase , lowerCAmelCase = 512 , lowerCAmelCase = 512 , lowerCAmelCase = 50 , lowerCAmelCase = 7.5 , lowerCAmelCase = None , lowerCAmelCase = 1 , lowerCAmelCase = 0.0 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "pil" , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = 1 , **lowerCAmelCase , ):
return self.pipea(
prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , )
@torch.no_grad()
def A__ ( self , lowerCAmelCase , lowerCAmelCase = 512 , lowerCAmelCase = 512 , lowerCAmelCase = 50 , lowerCAmelCase = 7.5 , lowerCAmelCase = None , lowerCAmelCase = 1 , lowerCAmelCase = 0.0 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "pil" , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = 1 , **lowerCAmelCase , ):
return self.pipea(
prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , )
@torch.no_grad()
def A__ ( self , lowerCAmelCase , lowerCAmelCase = 512 , lowerCAmelCase = 512 , lowerCAmelCase = 50 , lowerCAmelCase = 7.5 , lowerCAmelCase = None , lowerCAmelCase = 1 , lowerCAmelCase = 0.0 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "pil" , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = 1 , **lowerCAmelCase , ):
return self.pipea(
prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , )
@torch.no_grad()
def A__ ( self , lowerCAmelCase , lowerCAmelCase = 512 , lowerCAmelCase = 512 , lowerCAmelCase = 50 , lowerCAmelCase = 7.5 , lowerCAmelCase = None , lowerCAmelCase = 1 , lowerCAmelCase = 0.0 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "pil" , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = 1 , **lowerCAmelCase , ):
return self.pipea(
prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , )
@torch.no_grad()
def A__ ( self , lowerCAmelCase , lowerCAmelCase = 512 , lowerCAmelCase = 512 , lowerCAmelCase = 50 , lowerCAmelCase = 7.5 , lowerCAmelCase = None , lowerCAmelCase = 1 , lowerCAmelCase = 0.0 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "pil" , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = 1 , **lowerCAmelCase , ):
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
self.to(lowerCAmelCase )
# Checks if the height and width are divisible by 8 or not
if height % 8 != 0 or width % 8 != 0:
raise ValueError(f'''`height` and `width` must be divisible by 8 but are {height} and {width}.''' )
# Get first result from Stable Diffusion Checkpoint v1.1
UpperCAmelCase_ = self.textaimg_sda_a(
prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.2
UpperCAmelCase_ = self.textaimg_sda_a(
prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.3
UpperCAmelCase_ = self.textaimg_sda_a(
prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , )
# Get first result from Stable Diffusion Checkpoint v1.4
UpperCAmelCase_ = self.textaimg_sda_a(
prompt=lowerCAmelCase , height=lowerCAmelCase , width=lowerCAmelCase , num_inference_steps=lowerCAmelCase , guidance_scale=lowerCAmelCase , negative_prompt=lowerCAmelCase , num_images_per_prompt=lowerCAmelCase , eta=lowerCAmelCase , generator=lowerCAmelCase , latents=lowerCAmelCase , output_type=lowerCAmelCase , return_dict=lowerCAmelCase , callback=lowerCAmelCase , callback_steps=lowerCAmelCase , **lowerCAmelCase , )
# Get all result images into a single list and pass it via StableDiffusionPipelineOutput for final result
return StableDiffusionPipelineOutput([resa[0], resa[0], resa[0], resa[0]] )
| 23 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=224 , lowerCAmelCase=1000 , lowerCAmelCase=[3, 3, 6, 4] , lowerCAmelCase=[48, 56, 112, 220] , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = layer_depths
UpperCAmelCase_ = embed_dims
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1e-5 , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ):
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = self.prepare_config_and_inputs()
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCAmelCase_ : int = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[Any] = False
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def A__ ( self ):
pass
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def A__ ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def A__ ( self ):
pass
def A__ ( self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
def _config_zero_init(lowerCAmelCase ):
UpperCAmelCase_ = copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1e-1_0 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
UpperCAmelCase_ = _config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self ):
pass
def snake_case__ ( ) -> str:
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(lowerCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 23 | 1 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
if index == number_of_items:
return 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = knapsack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
UpperCAmelCase_ = values[index] + knapsack(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 1 |
from dataclasses import dataclass
from typing import List, Optional, Union
import numpy as np
import PIL
from ...utils import BaseOutput, OptionalDependencyNotAvailable, is_torch_available, is_transformers_available
from .timesteps import (
fastaa_timesteps,
smartaa_timesteps,
smartaa_timesteps,
smartaaa_timesteps,
smartaaa_timesteps,
superaa_timesteps,
superaa_timesteps,
superaaa_timesteps,
)
@dataclass
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Union[List[PIL.Image.Image], np.ndarray]
lowerCAmelCase_ : Optional[List[bool]]
lowerCAmelCase_ : Optional[List[bool]]
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .pipeline_if import IFPipeline
from .pipeline_if_imgaimg import IFImgaImgPipeline
from .pipeline_if_imgaimg_superresolution import IFImgaImgSuperResolutionPipeline
from .pipeline_if_inpainting import IFInpaintingPipeline
from .pipeline_if_inpainting_superresolution import IFInpaintingSuperResolutionPipeline
from .pipeline_if_superresolution import IFSuperResolutionPipeline
from .safety_checker import IFSafetyChecker
from .watermark import IFWatermarker
| 23 |
import math
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list[int]:
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = int(math.sqrt(__SCREAMING_SNAKE_CASE ) ) # Size of every segment
UpperCAmelCase_ = [True] * (end + 1)
UpperCAmelCase_ = []
while start <= end:
if temp[start] is True:
in_prime.append(__SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
start += 1
prime += in_prime
UpperCAmelCase_ = end + 1
UpperCAmelCase_ = min(2 * end , __SCREAMING_SNAKE_CASE )
while low <= n:
UpperCAmelCase_ = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__SCREAMING_SNAKE_CASE , high + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase_ = high + 1
UpperCAmelCase_ = min(high + end , __SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 23 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : torch.FloatTensor
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 32 , lowerCAmelCase = 64 , lowerCAmelCase = 20 , lowerCAmelCase = 768 , lowerCAmelCase=77 , lowerCAmelCase=4 , lowerCAmelCase = 0.0 , lowerCAmelCase = "silu" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "linear" , lowerCAmelCase = "prd" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ):
super().__init__()
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = attention_head_dim
UpperCAmelCase_ = num_attention_heads * attention_head_dim
UpperCAmelCase_ = additional_embeddings
UpperCAmelCase_ = time_embed_dim or inner_dim
UpperCAmelCase_ = embedding_proj_dim or embedding_dim
UpperCAmelCase_ = clip_embed_dim or embedding_dim
UpperCAmelCase_ = Timesteps(lowerCAmelCase , lowerCAmelCase , 0 )
UpperCAmelCase_ = TimestepEmbedding(lowerCAmelCase , lowerCAmelCase , out_dim=lowerCAmelCase , act_fn=lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if embedding_proj_norm_type is None:
UpperCAmelCase_ = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if encoder_hid_proj_type is None:
UpperCAmelCase_ = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCAmelCase ) )
if added_emb_type == "prd":
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , 1 , lowerCAmelCase ) )
elif added_emb_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , dropout=lowerCAmelCase , activation_fn="gelu" , attention_bias=lowerCAmelCase , )
for d in range(lowerCAmelCase )
] )
if norm_in_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
elif norm_in_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , lowerCAmelCase , persistent=lowerCAmelCase )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ):
UpperCAmelCase_ = {}
def fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
UpperCAmelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return processors
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
module.set_processor(lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
self.set_attn_processor(AttnProcessor() )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , ):
UpperCAmelCase_ = hidden_states.shape[0]
UpperCAmelCase_ = timestep
if not torch.is_tensor(lowerCAmelCase ):
UpperCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ = timesteps * torch.ones(lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase_ = self.time_proj(lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ = self.time_embedding(lowerCAmelCase )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ = self.embedding_proj_norm(lowerCAmelCase )
UpperCAmelCase_ = self.embedding_proj(lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ = self.encoder_hidden_states_proj(lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
UpperCAmelCase_ = self.proj_in(lowerCAmelCase )
UpperCAmelCase_ = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ = hidden_states[:, None, :]
UpperCAmelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCAmelCase , -1 , -1 )
additional_embeds.append(lowerCAmelCase )
UpperCAmelCase_ = torch.cat(
lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ = F.pad(
lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
UpperCAmelCase_ = F.pad(lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ = self.norm_in(lowerCAmelCase )
for block in self.transformer_blocks:
UpperCAmelCase_ = block(lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase_ = self.norm_out(lowerCAmelCase )
if self.prd_embedding is not None:
UpperCAmelCase_ = hidden_states[:, -1]
else:
UpperCAmelCase_ = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ = self.proj_to_clip_embeddings(lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 23 | 1 |
from __future__ import annotations
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ , UpperCAmelCase_ = text, pattern
UpperCAmelCase_ , UpperCAmelCase_ = len(lowerCAmelCase ), len(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if char == self.pattern[i]:
return i
return -1
def A__ ( self , lowerCAmelCase ):
for i in range(self.patLen - 1 , -1 , -1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def A__ ( self ):
# searches pattern in text and returns index positions
UpperCAmelCase_ = []
for i in range(self.textLen - self.patLen + 1 ):
UpperCAmelCase_ = self.mismatch_in_text(lowerCAmelCase )
if mismatch_index == -1:
positions.append(lowerCAmelCase )
else:
UpperCAmelCase_ = self.match_in_pattern(self.text[mismatch_index] )
UpperCAmelCase_ = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
SCREAMING_SNAKE_CASE = "ABAABA"
SCREAMING_SNAKE_CASE = "AB"
SCREAMING_SNAKE_CASE = BoyerMooreSearch(text, pattern)
SCREAMING_SNAKE_CASE = bms.bad_character_heuristic()
if len(positions) == 0:
print("No match found")
else:
print("Pattern found in following positions: ")
print(positions)
| 23 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 1 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = abs(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = 0
while n > 0:
res += n % 10
n //= 10
return res
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = abs(__SCREAMING_SNAKE_CASE )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
return sum(int(__SCREAMING_SNAKE_CASE ) for c in str(abs(__SCREAMING_SNAKE_CASE ) ) )
def snake_case__ ( ) -> None:
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
UpperCAmelCase_ = f'''{func.__name__}({value})'''
UpperCAmelCase_ = timeit(f'''__main__.{call}''' , setup="import __main__" )
print(f'''{call:56} = {func(__SCREAMING_SNAKE_CASE )} -- {timing:.4f} seconds''' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 23 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = 'xlm-roberta'
def __init__( self , lowerCAmelCase=3_0522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def A__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 23 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCAmelCase_ = "-" if number.startswith("-" ) else ""
UpperCAmelCase_ = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 23 | 1 |
import requests
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
UpperCAmelCase_ = {"Content-Type": "application/json"}
UpperCAmelCase_ = requests.post(__SCREAMING_SNAKE_CASE , json={"text": message_body} , headers=__SCREAMING_SNAKE_CASE )
if response.status_code != 200:
UpperCAmelCase_ = (
"Request to slack returned an error "
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 23 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase ).to(lowerCAmelCase )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = model(input_ids.to(lowerCAmelCase ) , labels=labels.to(lowerCAmelCase ) ).loss
UpperCAmelCase_ = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 23 | 1 |
import warnings
from pathlib import Path
from typing import List, Tuple, Union
import fire
from torch import nn
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer, PreTrainedModel
from transformers.utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
UpperCAmelCase_ = nn.ModuleList([src_layers[i] for i in layers_to_copy] )
assert len(__SCREAMING_SNAKE_CASE ) == len(__SCREAMING_SNAKE_CASE ), f'''{len(__SCREAMING_SNAKE_CASE )} != {len(__SCREAMING_SNAKE_CASE )}'''
dest_layers.load_state_dict(layers_to_copy.state_dict() )
SCREAMING_SNAKE_CASE = {
# maps num layers in teacher -> num_layers in student -> which teacher layers to copy.
# 12: bart, 16: pegasus, 6: marian/Helsinki-NLP
12: {
1: [0], # This says that if the teacher has 12 layers and the student has 1, copy layer 0 of the teacher
2: [0, 6],
3: [0, 6, 11],
4: [0, 4, 8, 11],
6: [0, 2, 4, 7, 9, 11],
9: [0, 1, 2, 4, 5, 7, 9, 10, 11],
12: list(range(12)),
},
16: { # maps num layers in student -> which teacher layers to copy
1: [0],
2: [0, 15],
3: [0, 8, 15],
4: [0, 5, 10, 15],
6: [0, 3, 6, 9, 12, 15],
8: [0, 2, 4, 6, 8, 10, 12, 15],
9: [0, 1, 3, 5, 7, 9, 11, 13, 15],
12: [0, 1, 2, 3, 4, 5, 6, 7, 9, 11, 13, 15],
16: list(range(16)),
},
6: {1: [0], 2: [0, 5], 3: [0, 2, 5], 4: [0, 1, 3, 5], 6: list(range(6))},
}
SCREAMING_SNAKE_CASE = {
# maps num layers in student -> which teacher layers to copy.
6: {1: [5], 2: [3, 5], 3: [1, 4, 5], 4: [1, 2, 4, 5]},
12: {1: [11], 2: [5, 11], 3: [3, 7, 11], 6: [1, 3, 5, 8, 10, 11]},
16: {1: [15], 4: [4, 9, 12, 15], 8: [1, 3, 5, 7, 9, 11, 13, 15]},
}
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
try:
UpperCAmelCase_ = LAYERS_TO_COPY[n_teacher][n_student]
return val
except KeyError:
if n_student != n_teacher:
warnings.warn(
f'''no hardcoded layers to copy for teacher {n_teacher} -> student {n_student}, defaulting to first'''
f''' {n_student}''' )
return list(range(__SCREAMING_SNAKE_CASE ) )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[int]:
if n_student > n_teacher:
raise ValueError(f'''Cannot perform intermediate supervision for student {n_student} > teacher {n_teacher}''' )
elif n_teacher == n_student:
return list(range(__SCREAMING_SNAKE_CASE ) )
elif n_student == 1:
return [n_teacher - 1]
else:
return LAYERS_TO_SUPERVISE[n_teacher][n_student]
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "student" , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE=False , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , **__SCREAMING_SNAKE_CASE , ) -> Tuple[PreTrainedModel, List[int], List[int]]:
UpperCAmelCase_ = "encoder_layers and decoder_layers cannot be both None-- you would just have an identical teacher."
assert (e is not None) or (d is not None), _msg
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE ).save_pretrained(__SCREAMING_SNAKE_CASE ) # purely for convenience
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(__SCREAMING_SNAKE_CASE ).eval()
else:
assert isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), f'''teacher must be a model or string got type {type(__SCREAMING_SNAKE_CASE )}'''
UpperCAmelCase_ = teacher.config.to_diff_dict()
try:
UpperCAmelCase_ , UpperCAmelCase_ = teacher.config.encoder_layers, teacher.config.decoder_layers
if e is None:
UpperCAmelCase_ = teacher_e
if d is None:
UpperCAmelCase_ = teacher_d
init_kwargs.update({"encoder_layers": e, "decoder_layers": d} )
except AttributeError: # T5
if hasattr(teacher.config , "num_encoder_layers" ):
UpperCAmelCase_ , UpperCAmelCase_ = teacher.config.num_encoder_layers, teacher.config.num_decoder_layers
else:
UpperCAmelCase_ , UpperCAmelCase_ = teacher.config.num_layers, teacher.config.num_decoder_layers
if e is None:
UpperCAmelCase_ = teacher_e
if d is None:
UpperCAmelCase_ = teacher_d
if hasattr(teacher.config , "num_encoder_layers" ):
init_kwargs.update({"num_encoder_layers": e, "num_decoder_layers": d} )
else:
init_kwargs.update({"num_layers": e, "num_decoder_layers": d} )
# Kwargs to instantiate student: teacher kwargs with updated layer numbers + **extra_config_kwargs
init_kwargs.update(__SCREAMING_SNAKE_CASE )
# Copy weights
UpperCAmelCase_ = teacher.config_class(**__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_config(__SCREAMING_SNAKE_CASE )
# Start by copying the full teacher state dict this will copy the first N teacher layers to the student.
UpperCAmelCase_ = student.load_state_dict(teacher.state_dict() , strict=__SCREAMING_SNAKE_CASE )
assert info.missing_keys == [], info.missing_keys # every student key should have a teacher keys.
if copy_first_teacher_layers: # Our copying is done. We just log and save
UpperCAmelCase_ , UpperCAmelCase_ = list(range(__SCREAMING_SNAKE_CASE ) ), list(range(__SCREAMING_SNAKE_CASE ) )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to'''
f''' {save_path}''' )
student.save_pretrained(__SCREAMING_SNAKE_CASE )
return student, e_layers_to_copy, d_layers_to_copy
# Decide which layers of the teacher to copy. Not exactly alternating -- we try to keep first and last layer.
if e_layers_to_copy is None:
UpperCAmelCase_ = pick_layers_to_copy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if d_layers_to_copy is None:
UpperCAmelCase_ = pick_layers_to_copy(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
try:
if hasattr(
__SCREAMING_SNAKE_CASE , "prophetnet" ): # For ProphetNet, student.model.encoder.layers is called student.prophetnet.encoder.layers
copy_layers(teacher.prophetnet.encoder.layers , student.prophetnet.encoder.layers , __SCREAMING_SNAKE_CASE )
copy_layers(teacher.prophetnet.decoder.layers , student.prophetnet.decoder.layers , __SCREAMING_SNAKE_CASE )
else:
copy_layers(teacher.model.encoder.layers , student.model.encoder.layers , __SCREAMING_SNAKE_CASE )
copy_layers(teacher.model.decoder.layers , student.model.decoder.layers , __SCREAMING_SNAKE_CASE )
except AttributeError: # For t5, student.model.encoder.layers is called student.encoder.block
copy_layers(teacher.encoder.block , student.encoder.block , __SCREAMING_SNAKE_CASE )
copy_layers(teacher.decoder.block , student.decoder.block , __SCREAMING_SNAKE_CASE )
logger.info(
f'''Copied encoder layers {e_layers_to_copy} and decoder layers {d_layers_to_copy}. Saving them to {save_path}''' )
UpperCAmelCase_ = {
"teacher_type": teacher.config.model_type,
"copied_encoder_layers": e_layers_to_copy,
"copied_decoder_layers": d_layers_to_copy,
}
student.save_pretrained(__SCREAMING_SNAKE_CASE )
# Save information about copying for easier reproducibility
return student, e_layers_to_copy, d_layers_to_copy
if __name__ == "__main__":
fire.Fire(create_student_by_copying_alternating_layers)
| 23 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
UpperCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 23 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
if is_vision_available():
import PIL
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = ['pixel_values']
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BICUBIC , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
UpperCAmelCase_ = image_std if image_std is not None else OPENAI_CLIP_STD
UpperCAmelCase_ = do_convert_rgb
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BICUBIC , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size=size["shortest_edge"] , default_to_square=lowerCAmelCase )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''The `size` parameter must contain the keys (height, width). Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="size" , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
UpperCAmelCase_ = make_list_of_images(lowerCAmelCase )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
UpperCAmelCase_ = [convert_to_rgb(lowerCAmelCase ) for image in images]
# All transformations expect numpy arrays.
UpperCAmelCase_ = [to_numpy_array(lowerCAmelCase ) for image in images]
if do_resize:
UpperCAmelCase_ = [self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase ) for image in images]
if do_center_crop:
UpperCAmelCase_ = [self.center_crop(image=lowerCAmelCase , size=lowerCAmelCase ) for image in images]
if do_rescale:
UpperCAmelCase_ = [self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase ) for image in images]
if do_normalize:
UpperCAmelCase_ = [self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase ) for image in images]
UpperCAmelCase_ = [to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase ) for image in images]
UpperCAmelCase_ = {"pixel_values": images}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 23 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[List[ImageInput]]:
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ['pixel_values']
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size["shortest_edge"] , default_to_square=lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(lowerCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(lowerCAmelCase , size=lowerCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase )
return image
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(lowerCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=lowerCAmelCase , do_resize=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , do_center_crop=lowerCAmelCase , crop_size=lowerCAmelCase , do_rescale=lowerCAmelCase , rescale_factor=lowerCAmelCase , do_normalize=lowerCAmelCase , image_mean=lowerCAmelCase , image_std=lowerCAmelCase , data_format=lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 23 | 1 |
from __future__ import annotations
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 0
while number > 0:
UpperCAmelCase_ = number % 10
sum_of_digits += last_digit
UpperCAmelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case__ ( __SCREAMING_SNAKE_CASE = 100 ) -> int:
UpperCAmelCase_ = factorial(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = split_and_add(__SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 23 | 1 |
import gc
import importlib.metadata
import tempfile
import unittest
from packaging import version
from transformers import (
AutoModel,
AutoModelForCausalLM,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoTokenizer,
BitsAndBytesConfig,
pipeline,
)
from transformers.testing_utils import (
is_torch_available,
require_accelerate,
require_bitsandbytes,
require_torch,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Any:
if model.config.model_type == "gpt2":
return model.transformer.h[0].mlp.c_fc
return model.transformer.h[0].mlp.dense_ah_to_h
if is_torch_available():
import torch
import torch.nn as nn
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
super().__init__()
UpperCAmelCase_ = module
UpperCAmelCase_ = nn.Sequential(
nn.Linear(module.in_features , lowerCAmelCase , bias=lowerCAmelCase ) , nn.Linear(lowerCAmelCase , module.out_features , bias=lowerCAmelCase ) , )
UpperCAmelCase_ = (2.0 / (5 * min(module.in_features , module.out_features ))) ** 0.5
nn.init.normal_(self.adapter[0].weight , std=lowerCAmelCase )
nn.init.zeros_(self.adapter[1].weight )
self.adapter.to(module.weight.device )
def A__ ( self , lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ):
return self.module(lowerCAmelCase , *lowerCAmelCase , **lowerCAmelCase ) + self.adapter(lowerCAmelCase )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Any = 'bigscience/bloom-1b7'
# Constant values
lowerCAmelCase_ : Union[str, Any] = 2.1_0_9_6_5_9_5_5_2_6_9_2_5_7_4
lowerCAmelCase_ : List[Any] = 'Hello my name is'
lowerCAmelCase_ : List[str] = set()
EXPECTED_OUTPUTS.add('Hello my name is John and I am a professional photographer. I' )
EXPECTED_OUTPUTS.add('Hello my name is John.\nI am a friend of your father.\n' )
EXPECTED_OUTPUTS.add('Hello my name is John Doe, I am a student at the University' )
lowerCAmelCase_ : Any = 10
def A__ ( self ):
# Models and tokenizer
UpperCAmelCase_ = AutoTokenizer.from_pretrained(self.model_name )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def A__ ( self ):
super().setUp()
# Models and tokenizer
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(
self.model_name , torch_dtype=torch.floataa , device_map="auto" )
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map="auto" )
def A__ ( self ):
del self.model_fpaa
del self.model_abit
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
UpperCAmelCase_ = self.model_abit.config
self.assertTrue(hasattr(lowerCAmelCase , "quantization_config" ) )
UpperCAmelCase_ = config.to_dict()
UpperCAmelCase_ = config.to_diff_dict()
UpperCAmelCase_ = config.to_json_string()
def A__ ( self ):
from bitsandbytes.nn import Paramsabit
UpperCAmelCase_ = self.model_fpaa.get_memory_footprint()
UpperCAmelCase_ = self.model_abit.get_memory_footprint()
self.assertAlmostEqual(mem_fpaa / mem_abit , self.EXPECTED_RELATIVE_DIFFERENCE )
UpperCAmelCase_ = get_some_linear_layer(self.model_abit )
self.assertTrue(linear.weight.__class__ == Paramsabit )
def A__ ( self ):
from transformers import TaPreTrainedModel
self.model_fpaa.get_memory_footprint()
self.model_abit.get_memory_footprint()
for name, module in self.model_abit.named_modules():
if isinstance(lowerCAmelCase , torch.nn.Linear ):
if name not in ["lm_head"] + TaPreTrainedModel._keep_in_fpaa_modules:
# 4-bit parameters are packed in uint8 variables
self.assertTrue(module.weight.dtype == torch.uinta )
def A__ ( self ):
UpperCAmelCase_ = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCAmelCase_ = self.model_abit.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase ) , self.EXPECTED_OUTPUTS )
def A__ ( self ):
UpperCAmelCase_ = BitsAndBytesConfig()
UpperCAmelCase_ = True
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase , device_map="auto" )
UpperCAmelCase_ = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCAmelCase_ = model_abit_from_config.generate(
input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_sequences[0] , skip_special_tokens=lowerCAmelCase ) , self.EXPECTED_OUTPUTS )
def A__ ( self ):
with self.assertRaises(lowerCAmelCase ), tempfile.TemporaryDirectory() as tmpdirname:
self.model_abit.save_pretrained(lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = BitsAndBytesConfig()
with self.assertRaises(lowerCAmelCase ):
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(
self.model_name , quantization_config=lowerCAmelCase , load_in_abit=lowerCAmelCase , device_map="auto" , bnb_abit_quant_type="nf4" , )
def A__ ( self ):
with self.assertRaises(lowerCAmelCase ):
# Tries with `str`
self.model_abit.to("cpu" )
with self.assertRaises(lowerCAmelCase ):
# Tries with a `dtype``
self.model_abit.to(torch.floataa )
with self.assertRaises(lowerCAmelCase ):
# Tries with a `device`
self.model_abit.to(torch.device("cuda:0" ) )
with self.assertRaises(lowerCAmelCase ):
# Tries with a `device`
self.model_abit.float()
with self.assertRaises(lowerCAmelCase ):
# Tries with a `device`
self.model_abit.half()
# Test if we did not break anything
UpperCAmelCase_ = self.tokenizer(self.input_text , return_tensors="pt" )
UpperCAmelCase_ = self.model_fpaa.to(torch.floataa )
UpperCAmelCase_ = self.model_fpaa.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
# Check this does not throw an error
UpperCAmelCase_ = self.model_fpaa.to("cpu" )
# Check this does not throw an error
UpperCAmelCase_ = self.model_fpaa.half()
# Check this does not throw an error
UpperCAmelCase_ = self.model_fpaa.float()
def A__ ( self ):
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained("t5-small" , load_in_abit=lowerCAmelCase , device_map="auto" )
self.assertTrue(model.decoder.block[0].layer[2].DenseReluDense.wo.weight.dtype == torch.floataa )
@require_bitsandbytes
@require_accelerate
@require_torch
@require_torch_gpu
@slow
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def A__ ( cls ):
UpperCAmelCase_ = "t5-small"
UpperCAmelCase_ = "google/flan-t5-small" # flan-t5 uses dense-act instead of dense-relu-dense
UpperCAmelCase_ = AutoTokenizer.from_pretrained(cls.model_name )
UpperCAmelCase_ = "Translate in German: Hello, my dog is cute"
def A__ ( self ):
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
from transformers import TaForConditionalGeneration
UpperCAmelCase_ = TaForConditionalGeneration._keep_in_fpaa_modules
UpperCAmelCase_ = None
# test with `t5-small`
UpperCAmelCase_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map="auto" )
UpperCAmelCase_ = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCAmelCase_ = model.generate(**lowerCAmelCase )
# test with `flan-t5-small`
UpperCAmelCase_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase , device_map="auto" )
UpperCAmelCase_ = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCAmelCase_ = model.generate(**lowerCAmelCase )
UpperCAmelCase_ = modules
def A__ ( self ):
import bitsandbytes as bnb
from transformers import TaForConditionalGeneration
# test with `t5-small`
UpperCAmelCase_ = TaForConditionalGeneration.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map="auto" )
# there was a bug with decoders - this test checks that it is fixed
self.assertTrue(isinstance(model.decoder.block[0].layer[0].SelfAttention.q , bnb.nn.Linearabit ) )
UpperCAmelCase_ = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCAmelCase_ = model.generate(**lowerCAmelCase )
# test with `flan-t5-small`
UpperCAmelCase_ = TaForConditionalGeneration.from_pretrained(
self.dense_act_model_name , load_in_abit=lowerCAmelCase , device_map="auto" )
UpperCAmelCase_ = self.tokenizer(self.input_text , return_tensors="pt" ).to(0 )
UpperCAmelCase_ = model.generate(**lowerCAmelCase )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def A__ ( self ):
super().setUp()
# model_name
UpperCAmelCase_ = "bigscience/bloom-560m"
UpperCAmelCase_ = "t5-small"
# Different types of model
UpperCAmelCase_ = AutoModel.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map="auto" )
# Sequence classification model
UpperCAmelCase_ = AutoModelForSequenceClassification.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase , device_map="auto" )
# CausalLM model
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase , device_map="auto" )
# Seq2seq model
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained(
self.seq_to_seq_name , load_in_abit=lowerCAmelCase , device_map="auto" )
def A__ ( self ):
del self.base_model
del self.sequence_model
del self.model_abit
del self.seq_to_seq_model
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
from bitsandbytes.nn import Paramsabit
self.assertTrue(self.base_model.h[-1].mlp.dense_ah_to_h.weight.__class__ == Paramsabit )
# Other heads should be nn.Parameter
self.assertTrue(self.model_abit.lm_head.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.sequence_model.score.weight.__class__ == torch.nn.Parameter )
self.assertTrue(self.seq_to_seq_model.lm_head.weight.__class__ == torch.nn.Parameter )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def A__ ( self ):
super().setUp()
def A__ ( self ):
del self.pipe
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
UpperCAmelCase_ = pipeline(
"text-generation" , model=self.model_name , model_kwargs={"device_map": "auto", "load_in_4bit": True, "torch_dtype": torch.floataa} , max_new_tokens=self.MAX_NEW_TOKENS , )
# Real second forward pass
UpperCAmelCase_ = self.pipe(self.input_text )
self.assertIn(pipeline_output[0]["generated_text"] , self.EXPECTED_OUTPUTS )
@require_torch_multi_gpu
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def A__ ( self ):
super().setUp()
def A__ ( self ):
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(
self.model_name , load_in_abit=lowerCAmelCase , device_map="balanced" )
# Check correct device map
self.assertEqual(set(model_parallel.hf_device_map.values() ) , {0, 1} )
# Check that inference pass works on the model
UpperCAmelCase_ = self.tokenizer(self.input_text , return_tensors="pt" )
# Second real batch
UpperCAmelCase_ = model_parallel.generate(input_ids=encoded_input["input_ids"].to(0 ) , max_new_tokens=10 )
self.assertIn(self.tokenizer.decode(output_parallel[0] , skip_special_tokens=lowerCAmelCase ) , self.EXPECTED_OUTPUTS )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = "facebook/opt-350m"
super().setUp()
def A__ ( self ):
if version.parse(importlib.metadata.version("bitsandbytes" ) ) < version.parse("0.37.0" ):
return
# Step 1: freeze all parameters
UpperCAmelCase_ = AutoModelForCausalLM.from_pretrained(self.model_name , load_in_abit=lowerCAmelCase )
self.assertEqual(set(model.hf_device_map.values() ) , {torch.cuda.current_device()} )
for param in model.parameters():
UpperCAmelCase_ = False # freeze the model - train adapters later
if param.ndim == 1:
# cast the small parameters (e.g. layernorm) to fp32 for stability
UpperCAmelCase_ = param.data.to(torch.floataa )
# Step 2: add adapters
for _, module in model.named_modules():
if "OPTAttention" in repr(type(lowerCAmelCase ) ):
UpperCAmelCase_ = LoRALayer(module.q_proj , rank=16 )
UpperCAmelCase_ = LoRALayer(module.k_proj , rank=16 )
UpperCAmelCase_ = LoRALayer(module.v_proj , rank=16 )
# Step 3: dummy batch
UpperCAmelCase_ = self.tokenizer("Test batch " , return_tensors="pt" ).to(0 )
# Step 4: Check if the gradient is not None
with torch.cuda.amp.autocast():
UpperCAmelCase_ = model.forward(**lowerCAmelCase )
out.logits.norm().backward()
for module in model.modules():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
self.assertTrue(module.adapter[1].weight.grad is not None )
self.assertTrue(module.adapter[1].weight.grad.norm().item() > 0 )
elif isinstance(lowerCAmelCase , nn.Embedding ):
self.assertTrue(module.weight.grad is None )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Dict = 'gpt2-xl'
lowerCAmelCase_ : int = 3.3_1_9_1_8_5_4_8_5_4_1_5_2_1_8_7
| 23 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 1 |
# We ignore warnings about stepping the scheduler since we step it ourselves during gradient accumulation
import warnings
from .state import AcceleratorState, GradientState
warnings.filterwarnings("ignore", category=UserWarning, module="torch.optim.lr_scheduler")
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = True , lowerCAmelCase = False ):
UpperCAmelCase_ = scheduler
UpperCAmelCase_ = optimizers if isinstance(lowerCAmelCase , (list, tuple) ) else [optimizers]
UpperCAmelCase_ = split_batches
UpperCAmelCase_ = step_with_optimizer
UpperCAmelCase_ = GradientState()
def A__ ( self , *lowerCAmelCase , **lowerCAmelCase ):
if not self.step_with_optimizer:
# No link between scheduler and optimizer -> just step
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
return
# Otherwise, first make sure the optimizer was stepped.
if not self.gradient_state.sync_gradients:
if self.gradient_state.adjust_scheduler:
self.scheduler._step_count += 1
return
for opt in self.optimizers:
if opt.step_was_skipped:
return
if self.split_batches:
# Split batches -> the training dataloader batch size is not changed so one step per training step
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
else:
# Otherwise the training dataloader batch size was multiplied by `num_processes`, so we need to do
# num_processes steps per training step
UpperCAmelCase_ = AcceleratorState().num_processes
for _ in range(lowerCAmelCase ):
# Special case when using OneCycle and `drop_last` was not used
if hasattr(self.scheduler , "total_steps" ):
if self.scheduler._step_count <= self.scheduler.total_steps:
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
else:
self.scheduler.step(*lowerCAmelCase , **lowerCAmelCase )
def A__ ( self ):
return self.scheduler.get_last_lr()
def A__ ( self ):
return self.scheduler.state_dict()
def A__ ( self , lowerCAmelCase ):
self.scheduler.load_state_dict(lowerCAmelCase )
def A__ ( self ):
return self.scheduler.get_lr()
def A__ ( self , *lowerCAmelCase , **lowerCAmelCase ):
return self.scheduler.print_lr(*lowerCAmelCase , **lowerCAmelCase )
| 23 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
# Initialise PyTorch model
UpperCAmelCase_ = MobileBertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ = MobileBertForPreTraining(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
UpperCAmelCase_ = load_tf_weights_in_mobilebert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"microsoft/cvt-13": "https://huggingface.co/microsoft/cvt-13/resolve/main/config.json",
# See all Cvt models at https://huggingface.co/models?filter=cvt
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = 'cvt'
def __init__( self , lowerCAmelCase=3 , lowerCAmelCase=[7, 3, 3] , lowerCAmelCase=[4, 2, 2] , lowerCAmelCase=[2, 1, 1] , lowerCAmelCase=[64, 192, 384] , lowerCAmelCase=[1, 3, 6] , lowerCAmelCase=[1, 2, 10] , lowerCAmelCase=[4.0, 4.0, 4.0] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=[0.0, 0.0, 0.0] , lowerCAmelCase=[0.0, 0.0, 0.1] , lowerCAmelCase=[True, True, True] , lowerCAmelCase=[False, False, True] , lowerCAmelCase=["dw_bn", "dw_bn", "dw_bn"] , lowerCAmelCase=[3, 3, 3] , lowerCAmelCase=[1, 1, 1] , lowerCAmelCase=[2, 2, 2] , lowerCAmelCase=[1, 1, 1] , lowerCAmelCase=[1, 1, 1] , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = patch_sizes
UpperCAmelCase_ = patch_stride
UpperCAmelCase_ = patch_padding
UpperCAmelCase_ = embed_dim
UpperCAmelCase_ = num_heads
UpperCAmelCase_ = depth
UpperCAmelCase_ = mlp_ratio
UpperCAmelCase_ = attention_drop_rate
UpperCAmelCase_ = drop_rate
UpperCAmelCase_ = drop_path_rate
UpperCAmelCase_ = qkv_bias
UpperCAmelCase_ = cls_token
UpperCAmelCase_ = qkv_projection_method
UpperCAmelCase_ = kernel_qkv
UpperCAmelCase_ = padding_kv
UpperCAmelCase_ = stride_kv
UpperCAmelCase_ = padding_q
UpperCAmelCase_ = stride_q
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
| 23 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = str(id_ )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = []
UpperCAmelCase_ = {} # {vertex:distance}
def __lt__( self , lowerCAmelCase ):
return self.key < other.key
def __repr__( self ):
return self.id
def A__ ( self , lowerCAmelCase ):
self.neighbors.append(lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = weight
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __SCREAMING_SNAKE_CASE )
graph[b - 1].add_edge(graph[a - 1] , __SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list:
UpperCAmelCase_ = []
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = graph[:]
while q:
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE )
q.remove(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Iterator[tuple]:
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
hq.heapify(__SCREAMING_SNAKE_CASE )
while h:
UpperCAmelCase_ = hq.heappop(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
hq.heapify(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 1 |
from typing import Optional, Tuple
import jax
import jax.numpy as jnp
from flax import linen as nn
from flax.core.frozen_dict import FrozenDict
from transformers import CLIPConfig, FlaxPreTrainedModel
from transformers.models.clip.modeling_flax_clip import FlaxCLIPVisionModule
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=1E-12 ) -> List[Any]:
UpperCAmelCase_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__SCREAMING_SNAKE_CASE , axis=1 ) , a_min=__SCREAMING_SNAKE_CASE ) ).T
UpperCAmelCase_ = jnp.divide(emb_a.T , jnp.clip(jnp.linalg.norm(__SCREAMING_SNAKE_CASE , axis=1 ) , a_min=__SCREAMING_SNAKE_CASE ) ).T
return jnp.matmul(__SCREAMING_SNAKE_CASE , norm_emb_a.T )
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
lowerCAmelCase_ : CLIPConfig
lowerCAmelCase_ : jnp.dtype = jnp.floataa
def A__ ( self ):
UpperCAmelCase_ = FlaxCLIPVisionModule(self.config.vision_config )
UpperCAmelCase_ = nn.Dense(self.config.projection_dim , use_bias=lowerCAmelCase , dtype=self.dtype )
UpperCAmelCase_ = self.param("concept_embeds" , jax.nn.initializers.ones , (17, self.config.projection_dim) )
UpperCAmelCase_ = self.param(
"special_care_embeds" , jax.nn.initializers.ones , (3, self.config.projection_dim) )
UpperCAmelCase_ = self.param("concept_embeds_weights" , jax.nn.initializers.ones , (17,) )
UpperCAmelCase_ = self.param("special_care_embeds_weights" , jax.nn.initializers.ones , (3,) )
def __call__( self , lowerCAmelCase ):
UpperCAmelCase_ = self.vision_model(lowerCAmelCase )[1]
UpperCAmelCase_ = self.visual_projection(lowerCAmelCase )
UpperCAmelCase_ = jax_cosine_distance(lowerCAmelCase , self.special_care_embeds )
UpperCAmelCase_ = jax_cosine_distance(lowerCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign image inputs
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = special_cos_dist - self.special_care_embeds_weights[None, :] + adjustment
UpperCAmelCase_ = jnp.round(lowerCAmelCase , 3 )
UpperCAmelCase_ = jnp.any(special_scores > 0 , axis=1 , keepdims=lowerCAmelCase )
# Use a lower threshold if an image has any special care concept
UpperCAmelCase_ = is_special_care * 0.01
UpperCAmelCase_ = cos_dist - self.concept_embeds_weights[None, :] + special_adjustment
UpperCAmelCase_ = jnp.round(lowerCAmelCase , 3 )
UpperCAmelCase_ = jnp.any(concept_scores > 0 , axis=1 )
return has_nsfw_concepts
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = CLIPConfig
lowerCAmelCase_ : int = 'clip_input'
lowerCAmelCase_ : Tuple = FlaxStableDiffusionSafetyCheckerModule
def __init__( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = 0 , lowerCAmelCase = jnp.floataa , lowerCAmelCase = True , **lowerCAmelCase , ):
if input_shape is None:
UpperCAmelCase_ = (1, 224, 224, 3)
UpperCAmelCase_ = self.module_class(config=lowerCAmelCase , dtype=lowerCAmelCase , **lowerCAmelCase )
super().__init__(lowerCAmelCase , lowerCAmelCase , input_shape=lowerCAmelCase , seed=lowerCAmelCase , dtype=lowerCAmelCase , _do_init=_do_init )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None ):
# init input tensor
UpperCAmelCase_ = jax.random.normal(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = jax.random.split(lowerCAmelCase )
UpperCAmelCase_ = {"params": params_rng, "dropout": dropout_rng}
UpperCAmelCase_ = self.module.init(lowerCAmelCase , lowerCAmelCase )["params"]
return random_params
def __call__( self , lowerCAmelCase , lowerCAmelCase = None , ):
UpperCAmelCase_ = jnp.transpose(lowerCAmelCase , (0, 2, 3, 1) )
return self.module.apply(
{"params": params or self.params} , jnp.array(lowerCAmelCase , dtype=jnp.floataa ) , rngs={} , )
| 23 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 1 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
SCREAMING_SNAKE_CASE = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = CamembertTokenizer
lowerCAmelCase_ : int = CamembertTokenizerFast
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Union[str, Any] = True
def A__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = CamembertTokenizer(lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self ):
UpperCAmelCase_ = "<pad>"
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCAmelCase ) , 1004 )
def A__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def A__ ( self ):
UpperCAmelCase_ = CamembertTokenizer(lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
UpperCAmelCase_ = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
@slow
def A__ ( self ):
# fmt: off
UpperCAmelCase_ = {"input_ids": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
UpperCAmelCase_ = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=lowerCAmelCase , )
| 23 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": 512,
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : List[str] = RetriBertTokenizer
lowerCAmelCase_ : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 1 |
from math import factorial, pi
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 30 ) -> float:
if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError("maclaurin_sin() requires either an int or float for theta" )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("maclaurin_sin() requires a positive int for accuracy" )
UpperCAmelCase_ = float(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = theta // (2 * pi)
theta -= 2 * div * pi
return sum(
(-1) ** r * theta ** (2 * r + 1) / factorial(2 * r + 1 ) for r in range(__SCREAMING_SNAKE_CASE ) )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 30 ) -> float:
if not isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
raise ValueError("maclaurin_cos() requires either an int or float for theta" )
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) or accuracy <= 0:
raise ValueError("maclaurin_cos() requires a positive int for accuracy" )
UpperCAmelCase_ = float(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = theta // (2 * pi)
theta -= 2 * div * pi
return sum((-1) ** r * theta ** (2 * r) / factorial(2 * r ) for r in range(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
print(maclaurin_sin(10))
print(maclaurin_sin(-10))
print(maclaurin_sin(10, 15))
print(maclaurin_sin(-10, 15))
print(maclaurin_cos(5))
print(maclaurin_cos(-5))
print(maclaurin_cos(10, 15))
print(maclaurin_cos(-10, 15))
| 23 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : int = ['input_ids', 'attention_mask']
lowerCAmelCase_ : str = DistilBertTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 1 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = MODEL_FOR_MASKED_LM_MAPPING
lowerCAmelCase_ : Any = TF_MODEL_FOR_MASKED_LM_MAPPING
def A__ ( self ):
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def A__ ( self ):
UpperCAmelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="tf" )
UpperCAmelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=6 ) , [
{"sequence": "My name is grouped", "score": 2.1e-0_5, "token": 3_8015, "token_str": " grouped"},
{"sequence": "My name is accuser", "score": 2.1e-0_5, "token": 2_5506, "token_str": " accuser"},
] , )
UpperCAmelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=6 ) , [
{
"sequence": "The largest city in France is grouped",
"score": 2.1e-0_5,
"token": 3_8015,
"token_str": " grouped",
},
{
"sequence": "The largest city in France is accuser",
"score": 2.1e-0_5,
"token": 2_5506,
"token_str": " accuser",
},
] , )
UpperCAmelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=6 ) , [
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 1_3606, "token_str": " Clara"},
{"sequence": "My name is Patrick", "score": 2e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 1.9e-0_5, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def A__ ( self ):
UpperCAmelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , top_k=2 , framework="pt" )
UpperCAmelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=6 ) , [
{"sequence": "My name is Maul", "score": 2.2e-0_5, "token": 3_5676, "token_str": " Maul"},
{"sequence": "My name isELS", "score": 2.2e-0_5, "token": 1_6416, "token_str": "ELS"},
] , )
UpperCAmelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=6 ) , [
{
"sequence": "The largest city in France is Maul",
"score": 2.2e-0_5,
"token": 3_5676,
"token_str": " Maul",
},
{"sequence": "The largest city in France isELS", "score": 2.2e-0_5, "token": 1_6416, "token_str": "ELS"},
] , )
UpperCAmelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=6 ) , [
{"sequence": "My name is Patrick", "score": 2.1e-0_5, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Te", "score": 2e-0_5, "token": 2941, "token_str": " Te"},
{"sequence": "My name is Clara", "score": 2e-0_5, "token": 1_3606, "token_str": " Clara"},
] , )
UpperCAmelCase_ = unmasker("My name is <mask> <mask>" , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase , decimals=6 ) , [
[
{
"score": 2.2e-0_5,
"token": 3_5676,
"token_str": " Maul",
"sequence": "<s>My name is Maul<mask></s>",
},
{"score": 2.2e-0_5, "token": 1_6416, "token_str": "ELS", "sequence": "<s>My name isELS<mask></s>"},
],
[
{
"score": 2.2e-0_5,
"token": 3_5676,
"token_str": " Maul",
"sequence": "<s>My name is<mask> Maul</s>",
},
{"score": 2.2e-0_5, "token": 1_6416, "token_str": "ELS", "sequence": "<s>My name is<mask>ELS</s>"},
],
] , )
@require_torch_gpu
def A__ ( self ):
UpperCAmelCase_ = pipeline("fill-mask" , model="hf-internal-testing/tiny-random-distilbert" , device=0 , framework="pt" )
# convert model to fp16
pipe.model.half()
UpperCAmelCase_ = pipe("Paris is the [MASK] of France." )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(lowerCAmelCase , lowerCAmelCase )
@slow
@require_torch
def A__ ( self ):
UpperCAmelCase_ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="pt" )
self.run_large_test(lowerCAmelCase )
@slow
@require_tf
def A__ ( self ):
UpperCAmelCase_ = pipeline(task="fill-mask" , model="distilroberta-base" , top_k=2 , framework="tf" )
self.run_large_test(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = unmasker("My name is <mask>" )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [
{"sequence": "My name is John", "score": 0.008, "token": 610, "token_str": " John"},
{"sequence": "My name is Chris", "score": 0.007, "token": 1573, "token_str": " Chris"},
] , )
UpperCAmelCase_ = unmasker("The largest city in France is <mask>" )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [
{
"sequence": "The largest city in France is Paris",
"score": 0.251,
"token": 2201,
"token_str": " Paris",
},
{
"sequence": "The largest city in France is Lyon",
"score": 0.214,
"token": 1_2790,
"token_str": " Lyon",
},
] , )
UpperCAmelCase_ = unmasker("My name is <mask>" , targets=[" Patrick", " Clara", " Teven"] , top_k=3 )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [
{"sequence": "My name is Patrick", "score": 0.005, "token": 3499, "token_str": " Patrick"},
{"sequence": "My name is Clara", "score": 0.000, "token": 1_3606, "token_str": " Clara"},
{"sequence": "My name is Te", "score": 0.000, "token": 2941, "token_str": " Te"},
] , )
@require_torch
def A__ ( self ):
UpperCAmelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="pt" )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
self.run_pipeline_test(lowerCAmelCase , [] )
@require_tf
def A__ ( self ):
UpperCAmelCase_ = pipeline(task="fill-mask" , model="sshleifer/tiny-distilroberta-base" , framework="tf" )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
self.run_pipeline_test(lowerCAmelCase , [] )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("The provided tokenizer has no mask token, (probably reformer or wav2vec2)" )
UpperCAmelCase_ = FillMaskPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase )
UpperCAmelCase_ = [
f'''This is another {tokenizer.mask_token} test''',
]
return fill_masker, examples
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = fill_masker.tokenizer
UpperCAmelCase_ = fill_masker.model
UpperCAmelCase_ = fill_masker(
f'''This is a {tokenizer.mask_token}''' , )
self.assertEqual(
lowerCAmelCase , [
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
] , )
UpperCAmelCase_ = fill_masker([f'''This is a {tokenizer.mask_token}'''] )
self.assertEqual(
lowerCAmelCase , [
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
] , )
UpperCAmelCase_ = fill_masker([f'''This is a {tokenizer.mask_token}''', f'''Another {tokenizer.mask_token} great test.'''] )
self.assertEqual(
lowerCAmelCase , [
[
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
],
[
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
],
] , )
with self.assertRaises(lowerCAmelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(lowerCAmelCase ):
fill_masker("This is" )
self.run_test_top_k(lowerCAmelCase , lowerCAmelCase )
self.run_test_targets(lowerCAmelCase , lowerCAmelCase )
self.run_test_top_k_targets(lowerCAmelCase , lowerCAmelCase )
self.fill_mask_with_duplicate_targets_and_top_k(lowerCAmelCase , lowerCAmelCase )
self.fill_mask_with_multiple_masks(lowerCAmelCase , lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = tokenizer.get_vocab()
UpperCAmelCase_ = sorted(vocab.keys() )[:2]
# Pipeline argument
UpperCAmelCase_ = FillMaskPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase , targets=lowerCAmelCase )
UpperCAmelCase_ = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
lowerCAmelCase , [
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
] , )
UpperCAmelCase_ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowerCAmelCase )
UpperCAmelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowerCAmelCase ) )
# Call argument
UpperCAmelCase_ = FillMaskPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase )
UpperCAmelCase_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=lowerCAmelCase )
self.assertEqual(
lowerCAmelCase , [
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
] , )
UpperCAmelCase_ = {vocab[el] for el in targets}
self.assertEqual({el["token"] for el in outputs} , lowerCAmelCase )
UpperCAmelCase_ = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["token_str"] for el in outputs} , set(lowerCAmelCase ) )
# Score equivalence
UpperCAmelCase_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=lowerCAmelCase )
UpperCAmelCase_ = [top_mask["token_str"] for top_mask in outputs]
UpperCAmelCase_ = [top_mask["score"] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCAmelCase ) == set(lowerCAmelCase ):
UpperCAmelCase_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=lowerCAmelCase )
UpperCAmelCase_ = [top_mask["score"] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(lowerCAmelCase ) , nested_simplify(lowerCAmelCase ) )
# Raises with invalid
with self.assertRaises(lowerCAmelCase ):
UpperCAmelCase_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(lowerCAmelCase ):
UpperCAmelCase_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets=[""] )
with self.assertRaises(lowerCAmelCase ):
UpperCAmelCase_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , targets="" )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = FillMaskPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase , top_k=2 )
UpperCAmelCase_ = fill_masker(f'''This is a {tokenizer.mask_token}''' )
self.assertEqual(
lowerCAmelCase , [
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
] , )
UpperCAmelCase_ = FillMaskPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase )
UpperCAmelCase_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
lowerCAmelCase , [
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
] , )
self.assertEqual(nested_simplify(lowerCAmelCase ) , nested_simplify(lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = tokenizer.get_vocab()
UpperCAmelCase_ = FillMaskPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase )
# top_k=2, ntargets=3
UpperCAmelCase_ = sorted(vocab.keys() )[:3]
UpperCAmelCase_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=2 , targets=lowerCAmelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
UpperCAmelCase_ = [el["token_str"] for el in sorted(lowerCAmelCase , key=lambda lowerCAmelCase : x["score"] , reverse=lowerCAmelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(lowerCAmelCase ).issubset(lowerCAmelCase ):
UpperCAmelCase_ = fill_masker(f'''This is a {tokenizer.mask_token}''' , top_k=3 , targets=lowerCAmelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(lowerCAmelCase ) , nested_simplify(lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = FillMaskPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.get_vocab()
# String duplicates + id duplicates
UpperCAmelCase_ = sorted(vocab.keys() )[:3]
UpperCAmelCase_ = [targets[0], targets[1], targets[0], targets[2], targets[1]]
UpperCAmelCase_ = fill_masker(f'''My name is {tokenizer.mask_token}''' , targets=lowerCAmelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(lowerCAmelCase ) , 3 )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = FillMaskPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase )
UpperCAmelCase_ = fill_masker(
f'''This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}''' , top_k=2 )
self.assertEqual(
lowerCAmelCase , [
[
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
],
[
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
],
[
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
{"sequence": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase ), "token": ANY(lowerCAmelCase ), "token_str": ANY(lowerCAmelCase )},
],
] , )
| 23 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.array:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
UpperCAmelCase_ = "f32le"
UpperCAmelCase_ = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase_ = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
UpperCAmelCase_ = output_stream[0]
UpperCAmelCase_ = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "f32le" , ) -> Dict:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
if format_for_conversion == "s16le":
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCAmelCase_ = platform.system()
if system == "Linux":
UpperCAmelCase_ = "alsa"
UpperCAmelCase_ = "default"
elif system == "Darwin":
UpperCAmelCase_ = "avfoundation"
UpperCAmelCase_ = ":0"
elif system == "Windows":
UpperCAmelCase_ = "dshow"
UpperCAmelCase_ = "default"
UpperCAmelCase_ = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase_ = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "f32le" , ) -> int:
if stream_chunk_s is not None:
UpperCAmelCase_ = stream_chunk_s
else:
UpperCAmelCase_ = chunk_length_s
UpperCAmelCase_ = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
UpperCAmelCase_ = np.intaa
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = np.floataa
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCAmelCase_ = chunk_length_s / 6
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
UpperCAmelCase_ = [stride_length_s, stride_length_s]
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase_ = datetime.datetime.now()
UpperCAmelCase_ = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
UpperCAmelCase_ = np.frombuffer(item["raw"] , dtype=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
UpperCAmelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Dict:
UpperCAmelCase_ = B""
UpperCAmelCase_ , UpperCAmelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCAmelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
UpperCAmelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase_ = (_stride_left, stride_right)
UpperCAmelCase_ = {"raw": acc[:chunk_len], "stride": stride}
if stream:
UpperCAmelCase_ = False
yield item
UpperCAmelCase_ = stride_left
UpperCAmelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
UpperCAmelCase_ = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
UpperCAmelCase_ = False
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
UpperCAmelCase_ = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 23 | 1 |
from torch import nn
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[Any]:
if act_fn in ["swish", "silu"]:
return nn.SiLU()
elif act_fn == "mish":
return nn.Mish()
elif act_fn == "gelu":
return nn.GELU()
else:
raise ValueError(f'''Unsupported activation function: {act_fn}''' )
| 23 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 768 , ):
super().__init__()
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.ones(1 , lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase = None , lowerCAmelCase = None , ):
UpperCAmelCase_ = nn.Parameter(self.mean.to(lowerCAmelCase ).to(lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(self.std.to(lowerCAmelCase ).to(lowerCAmelCase ) )
return self
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds * self.std) + self.mean
return embeds
| 23 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_roberta_prelayernorm": [
"ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP",
"RobertaPreLayerNormConfig",
"RobertaPreLayerNormOnnxConfig",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"RobertaPreLayerNormForCausalLM",
"RobertaPreLayerNormForMaskedLM",
"RobertaPreLayerNormForMultipleChoice",
"RobertaPreLayerNormForQuestionAnswering",
"RobertaPreLayerNormForSequenceClassification",
"RobertaPreLayerNormForTokenClassification",
"RobertaPreLayerNormModel",
"RobertaPreLayerNormPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFRobertaPreLayerNormForCausalLM",
"TFRobertaPreLayerNormForMaskedLM",
"TFRobertaPreLayerNormForMultipleChoice",
"TFRobertaPreLayerNormForQuestionAnswering",
"TFRobertaPreLayerNormForSequenceClassification",
"TFRobertaPreLayerNormForTokenClassification",
"TFRobertaPreLayerNormMainLayer",
"TFRobertaPreLayerNormModel",
"TFRobertaPreLayerNormPreTrainedModel",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"FlaxRobertaPreLayerNormForCausalLM",
"FlaxRobertaPreLayerNormForMaskedLM",
"FlaxRobertaPreLayerNormForMultipleChoice",
"FlaxRobertaPreLayerNormForQuestionAnswering",
"FlaxRobertaPreLayerNormForSequenceClassification",
"FlaxRobertaPreLayerNormForTokenClassification",
"FlaxRobertaPreLayerNormModel",
"FlaxRobertaPreLayerNormPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
if prompt is not None:
UpperCAmelCase_ = prompt
if generate_kwargs is not None:
UpperCAmelCase_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCAmelCase_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
UpperCAmelCase_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCAmelCase , **lowerCAmelCase ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
UpperCAmelCase_ = self.model.config.model_type
if model_type == "git":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(text=lowerCAmelCase , add_special_tokens=lowerCAmelCase ).input_ids
UpperCAmelCase_ = [self.tokenizer.cls_token_id] + input_ids
UpperCAmelCase_ = torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , header_text=lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCAmelCase_ = None
return model_inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCAmelCase )
and all(x is None for x in model_inputs["input_ids"] )
):
UpperCAmelCase_ = None
if generate_kwargs is None:
UpperCAmelCase_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCAmelCase_ = model_inputs.pop(self.model.main_input_name )
UpperCAmelCase_ = self.model.generate(lowerCAmelCase , **lowerCAmelCase , **lowerCAmelCase )
return model_outputs
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = []
for output_ids in model_outputs:
UpperCAmelCase_ = {
"generated_text": self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , )
}
records.append(lowerCAmelCase )
return records
| 23 | 1 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"kssteven/ibert-roberta-base": "https://huggingface.co/kssteven/ibert-roberta-base/resolve/main/config.json",
"kssteven/ibert-roberta-large": "https://huggingface.co/kssteven/ibert-roberta-large/resolve/main/config.json",
"kssteven/ibert-roberta-large-mnli": (
"https://huggingface.co/kssteven/ibert-roberta-large-mnli/resolve/main/config.json"
),
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = 'ibert'
def __init__( self , lowerCAmelCase=3_0522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=False , lowerCAmelCase="none" , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = quant_mode
UpperCAmelCase_ = force_dequant
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def A__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 23 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = TextToVideoSDPipeline
lowerCAmelCase_ : Dict = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowerCAmelCase_ : Optional[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def A__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(lowerCAmelCase )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def A__ ( self ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = TextToVideoSDPipeline(**lowerCAmelCase )
UpperCAmelCase_ = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase_ = "np"
UpperCAmelCase_ = sd_pipe(**lowerCAmelCase ).frames
UpperCAmelCase_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
UpperCAmelCase_ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=1e-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def A__ ( self ):
pass
def A__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=25 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 23 | 1 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
return int((input_a, input_a).count(0 ) != 0 )
def snake_case__ ( ) -> None:
assert nand_gate(0 , 0 ) == 1
assert nand_gate(0 , 1 ) == 1
assert nand_gate(1 , 0 ) == 1
assert nand_gate(1 , 1 ) == 0
if __name__ == "__main__":
print(nand_gate(0, 0))
print(nand_gate(0, 1))
print(nand_gate(1, 0))
print(nand_gate(1, 1))
| 23 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0
UpperCAmelCase_ = ugly_nums[ia] * 2
UpperCAmelCase_ = ugly_nums[ia] * 3
UpperCAmelCase_ = ugly_nums[ia] * 5
for _ in range(1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
ugly_nums.append(__SCREAMING_SNAKE_CASE )
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 23 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import (
AudioDiffusionPipeline,
AutoencoderKL,
DDIMScheduler,
DDPMScheduler,
DiffusionPipeline,
Mel,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def A__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDModel(
sample_size=(32, 64) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return model
@property
def A__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , cross_attention_dim=10 , )
return model
@property
def A__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
sample_size=(128, 64) , in_channels=1 , out_channels=1 , latent_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("DownEncoderBlock2D", "DownEncoderBlock2D") , up_block_types=("UpDecoderBlock2D", "UpDecoderBlock2D") , )
UpperCAmelCase_ = UNetaDModel(
sample_size=(64, 32) , in_channels=1 , out_channels=1 , layers_per_block=2 , block_out_channels=(128, 128) , down_block_types=("AttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "AttnUpBlock2D") , )
return vqvae, unet
@slow
def A__ ( self ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = Mel(
x_res=self.dummy_unet.config.sample_size[1] , y_res=self.dummy_unet.config.sample_size[0] , )
UpperCAmelCase_ = DDPMScheduler()
UpperCAmelCase_ = AudioDiffusionPipeline(vqvae=lowerCAmelCase , unet=self.dummy_unet , mel=lowerCAmelCase , scheduler=lowerCAmelCase )
UpperCAmelCase_ = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(42 )
UpperCAmelCase_ = pipe(generator=lowerCAmelCase , steps=4 )
UpperCAmelCase_ = output.audios[0]
UpperCAmelCase_ = output.images[0]
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(42 )
UpperCAmelCase_ = pipe(generator=lowerCAmelCase , steps=4 , return_dict=lowerCAmelCase )
UpperCAmelCase_ = output[0][0]
assert audio.shape == (1, (self.dummy_unet.config.sample_size[1] - 1) * mel.hop_length)
assert (
image.height == self.dummy_unet.config.sample_size[0]
and image.width == self.dummy_unet.config.sample_size[1]
)
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ = np.frombuffer(image_from_tuple.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ = np.array([69, 255, 255, 255, 0, 0, 77, 181, 12, 127] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_ = Mel(
x_res=self.dummy_vqvae_and_unet[0].config.sample_size[1] , y_res=self.dummy_vqvae_and_unet[0].config.sample_size[0] , )
UpperCAmelCase_ = DDIMScheduler()
UpperCAmelCase_ = self.dummy_vqvae_and_unet
UpperCAmelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=dummy_vqvae_and_unet[1] , mel=lowerCAmelCase , scheduler=lowerCAmelCase )
UpperCAmelCase_ = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
np.random.seed(0 )
UpperCAmelCase_ = np.random.uniform(-1 , 1 , ((dummy_vqvae_and_unet[0].config.sample_size[1] - 1) * mel.hop_length,) )
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(42 )
UpperCAmelCase_ = pipe(raw_audio=lowerCAmelCase , generator=lowerCAmelCase , start_step=5 , steps=10 )
UpperCAmelCase_ = output.images[0]
assert (
image.height == self.dummy_vqvae_and_unet[0].config.sample_size[0]
and image.width == self.dummy_vqvae_and_unet[0].config.sample_size[1]
)
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ = np.array([120, 117, 110, 109, 138, 167, 138, 148, 132, 121] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
UpperCAmelCase_ = self.dummy_unet_condition
UpperCAmelCase_ = AudioDiffusionPipeline(
vqvae=self.dummy_vqvae_and_unet[0] , unet=lowerCAmelCase , mel=lowerCAmelCase , scheduler=lowerCAmelCase )
UpperCAmelCase_ = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
np.random.seed(0 )
UpperCAmelCase_ = torch.rand((1, 1, 10) )
UpperCAmelCase_ = pipe(generator=lowerCAmelCase , encoding=lowerCAmelCase )
UpperCAmelCase_ = output.images[0]
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ = np.array([107, 103, 120, 127, 142, 122, 113, 122, 97, 111] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
UpperCAmelCase_ = torch_device
UpperCAmelCase_ = DiffusionPipeline.from_pretrained("teticio/audio-diffusion-ddim-256" )
UpperCAmelCase_ = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(42 )
UpperCAmelCase_ = pipe(generator=lowerCAmelCase )
UpperCAmelCase_ = output.audios[0]
UpperCAmelCase_ = output.images[0]
assert audio.shape == (1, (pipe.unet.config.sample_size[1] - 1) * pipe.mel.hop_length)
assert image.height == pipe.unet.config.sample_size[0] and image.width == pipe.unet.config.sample_size[1]
UpperCAmelCase_ = np.frombuffer(image.tobytes() , dtype="uint8" )[:10]
UpperCAmelCase_ = np.array([151, 167, 154, 144, 122, 134, 121, 105, 70, 26] )
assert np.abs(image_slice.flatten() - expected_slice ).max() == 0
| 23 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=224 , lowerCAmelCase=1000 , lowerCAmelCase=[3, 3, 6, 4] , lowerCAmelCase=[48, 56, 112, 220] , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = layer_depths
UpperCAmelCase_ = embed_dims
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1e-5 , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ):
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = self.prepare_config_and_inputs()
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCAmelCase_ : int = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[Any] = False
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def A__ ( self ):
pass
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def A__ ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def A__ ( self ):
pass
def A__ ( self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
def _config_zero_init(lowerCAmelCase ):
UpperCAmelCase_ = copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1e-1_0 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
UpperCAmelCase_ = _config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self ):
pass
def snake_case__ ( ) -> str:
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(lowerCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 23 | 1 |
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
SCREAMING_SNAKE_CASE = logging.get_logger(__name__) # pylint: disable=invalid-name
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
super().__init__()
self.register_modules(unet=lowerCAmelCase , scheduler=lowerCAmelCase )
@torch.no_grad()
def __call__( self , lowerCAmelCase = 1 , lowerCAmelCase = 100 , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , ):
if audio_length_in_s is None:
UpperCAmelCase_ = self.unet.config.sample_size / self.unet.config.sample_rate
UpperCAmelCase_ = audio_length_in_s * self.unet.config.sample_rate
UpperCAmelCase_ = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f'''{audio_length_in_s} is too small. Make sure it\'s bigger or equal to'''
f''' {3 * down_scale_factor / self.unet.config.sample_rate}.''' )
UpperCAmelCase_ = int(lowerCAmelCase )
if sample_size % down_scale_factor != 0:
UpperCAmelCase_ = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f'''{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled'''
f''' by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising'''
" process." )
UpperCAmelCase_ = int(lowerCAmelCase )
UpperCAmelCase_ = next(iter(self.unet.parameters() ) ).dtype
UpperCAmelCase_ = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != batch_size:
raise ValueError(
f'''You have passed a list of generators of length {len(lowerCAmelCase )}, but requested an effective batch'''
f''' size of {batch_size}. Make sure the batch size matches the length of the generators.''' )
UpperCAmelCase_ = randn_tensor(lowerCAmelCase , generator=lowerCAmelCase , device=self.device , dtype=lowerCAmelCase )
# set step values
self.scheduler.set_timesteps(lowerCAmelCase , device=audio.device )
UpperCAmelCase_ = self.scheduler.timesteps.to(lowerCAmelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
UpperCAmelCase_ = self.unet(lowerCAmelCase , lowerCAmelCase ).sample
# 2. compute previous image: x_t -> t_t-1
UpperCAmelCase_ = self.scheduler.step(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ).prev_sample
UpperCAmelCase_ = audio.clamp(-1 , 1 ).float().cpu().numpy()
UpperCAmelCase_ = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=lowerCAmelCase )
| 23 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 1 |
import string
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> None:
for key in range(len(string.ascii_uppercase ) ):
UpperCAmelCase_ = ""
for symbol in message:
if symbol in string.ascii_uppercase:
UpperCAmelCase_ = string.ascii_uppercase.find(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = num - key
if num < 0:
UpperCAmelCase_ = num + len(string.ascii_uppercase )
UpperCAmelCase_ = translated + string.ascii_uppercase[num]
else:
UpperCAmelCase_ = translated + symbol
print(f'''Decryption using Key #{key}: {translated}''' )
def snake_case__ ( ) -> None:
UpperCAmelCase_ = input("Encrypted message: " )
UpperCAmelCase_ = message.upper()
decrypt(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 23 |
import math
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list[int]:
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = int(math.sqrt(__SCREAMING_SNAKE_CASE ) ) # Size of every segment
UpperCAmelCase_ = [True] * (end + 1)
UpperCAmelCase_ = []
while start <= end:
if temp[start] is True:
in_prime.append(__SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
start += 1
prime += in_prime
UpperCAmelCase_ = end + 1
UpperCAmelCase_ = min(2 * end , __SCREAMING_SNAKE_CASE )
while low <= n:
UpperCAmelCase_ = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__SCREAMING_SNAKE_CASE , high + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase_ = high + 1
UpperCAmelCase_ = min(high + end , __SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 23 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
SCREAMING_SNAKE_CASE = {
"configuration_biogpt": ["BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP", "BioGptConfig"],
"tokenization_biogpt": ["BioGptTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST",
"BioGptForCausalLM",
"BioGptForTokenClassification",
"BioGptForSequenceClassification",
"BioGptModel",
"BioGptPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : torch.FloatTensor
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 32 , lowerCAmelCase = 64 , lowerCAmelCase = 20 , lowerCAmelCase = 768 , lowerCAmelCase=77 , lowerCAmelCase=4 , lowerCAmelCase = 0.0 , lowerCAmelCase = "silu" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "linear" , lowerCAmelCase = "prd" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ):
super().__init__()
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = attention_head_dim
UpperCAmelCase_ = num_attention_heads * attention_head_dim
UpperCAmelCase_ = additional_embeddings
UpperCAmelCase_ = time_embed_dim or inner_dim
UpperCAmelCase_ = embedding_proj_dim or embedding_dim
UpperCAmelCase_ = clip_embed_dim or embedding_dim
UpperCAmelCase_ = Timesteps(lowerCAmelCase , lowerCAmelCase , 0 )
UpperCAmelCase_ = TimestepEmbedding(lowerCAmelCase , lowerCAmelCase , out_dim=lowerCAmelCase , act_fn=lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if embedding_proj_norm_type is None:
UpperCAmelCase_ = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if encoder_hid_proj_type is None:
UpperCAmelCase_ = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCAmelCase ) )
if added_emb_type == "prd":
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , 1 , lowerCAmelCase ) )
elif added_emb_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , dropout=lowerCAmelCase , activation_fn="gelu" , attention_bias=lowerCAmelCase , )
for d in range(lowerCAmelCase )
] )
if norm_in_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
elif norm_in_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , lowerCAmelCase , persistent=lowerCAmelCase )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ):
UpperCAmelCase_ = {}
def fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
UpperCAmelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return processors
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
module.set_processor(lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
self.set_attn_processor(AttnProcessor() )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , ):
UpperCAmelCase_ = hidden_states.shape[0]
UpperCAmelCase_ = timestep
if not torch.is_tensor(lowerCAmelCase ):
UpperCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ = timesteps * torch.ones(lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase_ = self.time_proj(lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ = self.time_embedding(lowerCAmelCase )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ = self.embedding_proj_norm(lowerCAmelCase )
UpperCAmelCase_ = self.embedding_proj(lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ = self.encoder_hidden_states_proj(lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
UpperCAmelCase_ = self.proj_in(lowerCAmelCase )
UpperCAmelCase_ = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ = hidden_states[:, None, :]
UpperCAmelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCAmelCase , -1 , -1 )
additional_embeds.append(lowerCAmelCase )
UpperCAmelCase_ = torch.cat(
lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ = F.pad(
lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
UpperCAmelCase_ = F.pad(lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ = self.norm_in(lowerCAmelCase )
for block in self.transformer_blocks:
UpperCAmelCase_ = block(lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase_ = self.norm_out(lowerCAmelCase )
if self.prd_embedding is not None:
UpperCAmelCase_ = hidden_states[:, -1]
else:
UpperCAmelCase_ = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ = self.proj_to_clip_embeddings(lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 23 | 1 |
from __future__ import annotations
SCREAMING_SNAKE_CASE = list[list[int]]
# assigning initial values to the grid
SCREAMING_SNAKE_CASE = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
SCREAMING_SNAKE_CASE = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool:
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> tuple[int, int] | None:
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Matrix | None:
if location := find_empty_location(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ , UpperCAmelCase_ = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = digit
if sudoku(__SCREAMING_SNAKE_CASE ) is not None:
return grid
UpperCAmelCase_ = 0
return None
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> None:
for row in grid:
for cell in row:
print(__SCREAMING_SNAKE_CASE , end=" " )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print("\nExample grid:\n" + "=" * 20)
print_solution(example_grid)
print("\nExample grid solution:")
SCREAMING_SNAKE_CASE = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print("Cannot find a solution.")
| 23 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 1 |
import argparse
import glob
import logging
import os
import sys
import time
from collections import defaultdict
from pathlib import Path
from typing import Dict, List, Tuple
import numpy as np
import pytorch_lightning as pl
import torch
from callbacks import SeqaSeqLoggingCallback, get_checkpoint_callback, get_early_stopping_callback
from torch import nn
from torch.utils.data import DataLoader
from transformers import MBartTokenizer, TaForConditionalGeneration
from transformers.models.bart.modeling_bart import shift_tokens_right
from utils import (
ROUGE_KEYS,
LegacySeqaSeqDataset,
SeqaSeqDataset,
assert_all_frozen,
calculate_bleu,
calculate_rouge,
check_output_dir,
flatten_list,
freeze_embeds,
freeze_params,
get_git_info,
label_smoothed_nll_loss,
lmap,
pickle_save,
save_git_info,
save_json,
use_task_specific_params,
)
# need the parent dir module
sys.path.insert(2, str(Path(__file__).resolve().parents[1]))
from lightning_base import BaseTransformer, add_generic_args, generic_train # noqa
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = 'summarization'
lowerCAmelCase_ : Any = ['loss']
lowerCAmelCase_ : List[str] = ROUGE_KEYS
lowerCAmelCase_ : Dict = 'rouge2'
def __init__( self , lowerCAmelCase , **lowerCAmelCase ):
if hparams.sortish_sampler and hparams.gpus > 1:
UpperCAmelCase_ = False
elif hparams.max_tokens_per_batch is not None:
if hparams.gpus > 1:
raise NotImplementedError("Dynamic Batch size does not work for multi-gpu training" )
if hparams.sortish_sampler:
raise ValueError("--sortish_sampler and --max_tokens_per_batch may not be used simultaneously" )
super().__init__(lowerCAmelCase , num_labels=lowerCAmelCase , mode=self.mode , **lowerCAmelCase )
use_task_specific_params(self.model , "summarization" )
save_git_info(self.hparams.output_dir )
UpperCAmelCase_ = Path(self.output_dir ) / "metrics.json"
UpperCAmelCase_ = Path(self.output_dir ) / "hparams.pkl"
pickle_save(self.hparams , self.hparams_save_path )
UpperCAmelCase_ = 0
UpperCAmelCase_ = defaultdict(lowerCAmelCase )
UpperCAmelCase_ = self.config.model_type
UpperCAmelCase_ = self.config.tgt_vocab_size if self.model_type == "fsmt" else self.config.vocab_size
UpperCAmelCase_ = {
"data_dir": self.hparams.data_dir,
"max_source_length": self.hparams.max_source_length,
"prefix": self.model.config.prefix or "",
}
UpperCAmelCase_ = {
"train": self.hparams.n_train,
"val": self.hparams.n_val,
"test": self.hparams.n_test,
}
UpperCAmelCase_ = {k: v if v >= 0 else None for k, v in n_observations_per_split.items()}
UpperCAmelCase_ = {
"train": self.hparams.max_target_length,
"val": self.hparams.val_max_target_length,
"test": self.hparams.test_max_target_length,
}
assert self.target_lens["train"] <= self.target_lens["val"], f'''target_lens: {self.target_lens}'''
assert self.target_lens["train"] <= self.target_lens["test"], f'''target_lens: {self.target_lens}'''
if self.hparams.freeze_embeds:
freeze_embeds(self.model )
if self.hparams.freeze_encoder:
freeze_params(self.model.get_encoder() )
assert_all_frozen(self.model.get_encoder() )
UpperCAmelCase_ = get_git_info()["repo_sha"]
UpperCAmelCase_ = hparams.num_workers
UpperCAmelCase_ = None # default to config
if self.model.config.decoder_start_token_id is None and isinstance(self.tokenizer , lowerCAmelCase ):
UpperCAmelCase_ = self.tokenizer.lang_code_to_id[hparams.tgt_lang]
UpperCAmelCase_ = self.decoder_start_token_id
UpperCAmelCase_ = (
SeqaSeqDataset if hasattr(self.tokenizer , "prepare_seq2seq_batch" ) else LegacySeqaSeqDataset
)
UpperCAmelCase_ = False
UpperCAmelCase_ = self.model.config.num_beams if self.hparams.eval_beams is None else self.hparams.eval_beams
if self.hparams.eval_max_gen_length is not None:
UpperCAmelCase_ = self.hparams.eval_max_gen_length
else:
UpperCAmelCase_ = self.model.config.max_length
UpperCAmelCase_ = self.default_val_metric if self.hparams.val_metric is None else self.hparams.val_metric
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = {
k: self.tokenizer.batch_decode(v.tolist() ) if "mask" not in k else v.shape for k, v in batch.items()
}
save_json(lowerCAmelCase , Path(self.output_dir ) / "text_batch.json" )
save_json({k: v.tolist() for k, v in batch.items()} , Path(self.output_dir ) / "tok_batch.json" )
UpperCAmelCase_ = True
return readable_batch
def A__ ( self , lowerCAmelCase , **lowerCAmelCase ):
return self.model(lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = self.tokenizer.batch_decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , clean_up_tokenization_spaces=lowerCAmelCase )
return lmap(str.strip , lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = self.tokenizer.pad_token_id
UpperCAmelCase_ , UpperCAmelCase_ = batch["input_ids"], batch["attention_mask"]
UpperCAmelCase_ = batch["labels"]
if isinstance(self.model , lowerCAmelCase ):
UpperCAmelCase_ = self.model._shift_right(lowerCAmelCase )
else:
UpperCAmelCase_ = shift_tokens_right(lowerCAmelCase , lowerCAmelCase )
if not self.already_saved_batch: # This would be slightly better if it only happened on rank zero
UpperCAmelCase_ = decoder_input_ids
self.save_readable_batch(lowerCAmelCase )
UpperCAmelCase_ = self(lowerCAmelCase , attention_mask=lowerCAmelCase , decoder_input_ids=lowerCAmelCase , use_cache=lowerCAmelCase )
UpperCAmelCase_ = outputs["logits"]
if self.hparams.label_smoothing == 0:
# Same behavior as modeling_bart.py, besides ignoring pad_token_id
UpperCAmelCase_ = nn.CrossEntropyLoss(ignore_index=lowerCAmelCase )
assert lm_logits.shape[-1] == self.vocab_size
UpperCAmelCase_ = ce_loss_fct(lm_logits.view(-1 , lm_logits.shape[-1] ) , tgt_ids.view(-1 ) )
else:
UpperCAmelCase_ = nn.functional.log_softmax(lowerCAmelCase , dim=-1 )
UpperCAmelCase_ , UpperCAmelCase_ = label_smoothed_nll_loss(
lowerCAmelCase , lowerCAmelCase , self.hparams.label_smoothing , ignore_index=lowerCAmelCase )
return (loss,)
@property
def A__ ( self ):
return self.tokenizer.pad_token_id
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self._step(lowerCAmelCase )
UpperCAmelCase_ = dict(zip(self.loss_names , lowerCAmelCase ) )
# tokens per batch
UpperCAmelCase_ = batch["input_ids"].ne(self.pad ).sum() + batch["labels"].ne(self.pad ).sum()
UpperCAmelCase_ = batch["input_ids"].shape[0]
UpperCAmelCase_ = batch["input_ids"].eq(self.pad ).sum()
UpperCAmelCase_ = batch["input_ids"].eq(self.pad ).float().mean()
# TODO(SS): make a wandb summary metric for this
return {"loss": loss_tensors[0], "log": logs}
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
return self._generative_step(lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase="val" ):
self.step_count += 1
UpperCAmelCase_ = {k: torch.stack([x[k] for x in outputs] ).mean() for k in self.loss_names}
UpperCAmelCase_ = losses["loss"]
UpperCAmelCase_ = {
k: np.array([x[k] for x in outputs] ).mean() for k in self.metric_names + ["gen_time", "gen_len"]
}
UpperCAmelCase_ = (
generative_metrics[self.val_metric] if self.val_metric in generative_metrics else losses[self.val_metric]
)
UpperCAmelCase_ = torch.tensor(lowerCAmelCase ).type_as(lowerCAmelCase )
generative_metrics.update({k: v.item() for k, v in losses.items()} )
losses.update(lowerCAmelCase )
UpperCAmelCase_ = {f'''{prefix}_avg_{k}''': x for k, x in losses.items()}
UpperCAmelCase_ = self.step_count
self.metrics[prefix].append(lowerCAmelCase ) # callback writes this to self.metrics_save_path
UpperCAmelCase_ = flatten_list([x["preds"] for x in outputs] )
return {
"log": all_metrics,
"preds": preds,
f'''{prefix}_loss''': loss,
f'''{prefix}_{self.val_metric}''': metric_tensor,
}
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
return calculate_rouge(lowerCAmelCase , lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = time.time()
# parser.add_argument('--eval_max_gen_length', type=int, default=None, help='never generate more than n tokens')
UpperCAmelCase_ = self.model.generate(
batch["input_ids"] , attention_mask=batch["attention_mask"] , use_cache=lowerCAmelCase , decoder_start_token_id=self.decoder_start_token_id , num_beams=self.eval_beams , max_length=self.eval_max_length , )
UpperCAmelCase_ = (time.time() - ta) / batch["input_ids"].shape[0]
UpperCAmelCase_ = self.ids_to_clean_text(lowerCAmelCase )
UpperCAmelCase_ = self.ids_to_clean_text(batch["labels"] )
UpperCAmelCase_ = self._step(lowerCAmelCase )
UpperCAmelCase_ = dict(zip(self.loss_names , lowerCAmelCase ) )
UpperCAmelCase_ = self.calc_generative_metrics(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = np.mean(lmap(lowerCAmelCase , lowerCAmelCase ) )
base_metrics.update(gen_time=lowerCAmelCase , gen_len=lowerCAmelCase , preds=lowerCAmelCase , target=lowerCAmelCase , **lowerCAmelCase )
return base_metrics
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
return self._generative_step(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
return self.validation_epoch_end(lowerCAmelCase , prefix="test" )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = self.n_obs[type_path]
UpperCAmelCase_ = self.target_lens[type_path]
UpperCAmelCase_ = self.dataset_class(
self.tokenizer , type_path=lowerCAmelCase , n_obs=lowerCAmelCase , max_target_length=lowerCAmelCase , **self.dataset_kwargs , )
return dataset
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = False ):
UpperCAmelCase_ = self.get_dataset(lowerCAmelCase )
if self.hparams.sortish_sampler and type_path != "test" and type_path != "val":
UpperCAmelCase_ = dataset.make_sortish_sampler(lowerCAmelCase , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase , num_workers=self.num_workers , sampler=lowerCAmelCase , )
elif self.hparams.max_tokens_per_batch is not None and type_path != "test" and type_path != "val":
UpperCAmelCase_ = dataset.make_dynamic_sampler(
self.hparams.max_tokens_per_batch , distributed=self.hparams.gpus > 1 )
return DataLoader(
lowerCAmelCase , batch_sampler=lowerCAmelCase , collate_fn=dataset.collate_fn , num_workers=self.num_workers , )
else:
return DataLoader(
lowerCAmelCase , batch_size=lowerCAmelCase , collate_fn=dataset.collate_fn , shuffle=lowerCAmelCase , num_workers=self.num_workers , sampler=lowerCAmelCase , )
def A__ ( self ):
UpperCAmelCase_ = self.get_dataloader("train" , batch_size=self.hparams.train_batch_size , shuffle=lowerCAmelCase )
return dataloader
def A__ ( self ):
return self.get_dataloader("val" , batch_size=self.hparams.eval_batch_size )
def A__ ( self ):
return self.get_dataloader("test" , batch_size=self.hparams.eval_batch_size )
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
BaseTransformer.add_model_specific_args(lowerCAmelCase , lowerCAmelCase )
add_generic_args(lowerCAmelCase , lowerCAmelCase )
parser.add_argument(
"--max_source_length" , default=1024 , type=lowerCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--max_target_length" , default=56 , type=lowerCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--val_max_target_length" , default=142 , type=lowerCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--test_max_target_length" , default=142 , type=lowerCAmelCase , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument("--freeze_encoder" , action="store_true" )
parser.add_argument("--freeze_embeds" , action="store_true" )
parser.add_argument("--sortish_sampler" , action="store_true" , default=lowerCAmelCase )
parser.add_argument("--overwrite_output_dir" , action="store_true" , default=lowerCAmelCase )
parser.add_argument("--max_tokens_per_batch" , type=lowerCAmelCase , default=lowerCAmelCase )
parser.add_argument("--logger_name" , type=lowerCAmelCase , choices=["default", "wandb", "wandb_shared"] , default="default" )
parser.add_argument("--n_train" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="# examples. -1 means use all." )
parser.add_argument("--n_val" , type=lowerCAmelCase , default=500 , required=lowerCAmelCase , help="# examples. -1 means use all." )
parser.add_argument("--n_test" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help="# examples. -1 means use all." )
parser.add_argument(
"--task" , type=lowerCAmelCase , default="summarization" , required=lowerCAmelCase , help="# examples. -1 means use all." )
parser.add_argument("--label_smoothing" , type=lowerCAmelCase , default=0.0 , required=lowerCAmelCase )
parser.add_argument("--src_lang" , type=lowerCAmelCase , default="" , required=lowerCAmelCase )
parser.add_argument("--tgt_lang" , type=lowerCAmelCase , default="" , required=lowerCAmelCase )
parser.add_argument("--eval_beams" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase )
parser.add_argument(
"--val_metric" , type=lowerCAmelCase , default=lowerCAmelCase , required=lowerCAmelCase , choices=["bleu", "rouge2", "loss", None] )
parser.add_argument("--eval_max_gen_length" , type=lowerCAmelCase , default=lowerCAmelCase , help="never generate more than n tokens" )
parser.add_argument("--save_top_k" , type=lowerCAmelCase , default=1 , required=lowerCAmelCase , help="How many checkpoints to save" )
parser.add_argument(
"--early_stopping_patience" , type=lowerCAmelCase , default=-1 , required=lowerCAmelCase , help=(
"-1 means never early stop. early_stopping_patience is measured in validation checks, not epochs. So"
" val_check_interval will effect it."
) , )
return parser
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = 'translation'
lowerCAmelCase_ : Optional[Any] = ['loss']
lowerCAmelCase_ : Optional[Any] = ['bleu']
lowerCAmelCase_ : List[str] = 'bleu'
def __init__( self , lowerCAmelCase , **lowerCAmelCase ):
super().__init__(lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = hparams.src_lang
UpperCAmelCase_ = hparams.tgt_lang
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
return calculate_bleu(lowerCAmelCase , lowerCAmelCase )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> SummarizationModule:
Path(args.output_dir ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
check_output_dir(__SCREAMING_SNAKE_CASE , expected_items=3 )
if model is None:
if "summarization" in args.task:
UpperCAmelCase_ = SummarizationModule(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = TranslationModule(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = Path(args.data_dir ).name
if (
args.logger_name == "default"
or args.fast_dev_run
or str(args.output_dir ).startswith("/tmp" )
or str(args.output_dir ).startswith("/var" )
):
UpperCAmelCase_ = True # don't pollute wandb logs unnecessarily
elif args.logger_name == "wandb":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ = os.environ.get("WANDB_PROJECT" , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = WandbLogger(name=model.output_dir.name , project=__SCREAMING_SNAKE_CASE )
elif args.logger_name == "wandb_shared":
from pytorch_lightning.loggers import WandbLogger
UpperCAmelCase_ = WandbLogger(name=model.output_dir.name , project=f'''hf_{dataset}''' )
if args.early_stopping_patience >= 0:
UpperCAmelCase_ = get_early_stopping_callback(model.val_metric , args.early_stopping_patience )
else:
UpperCAmelCase_ = False
UpperCAmelCase_ = args.val_metric == "loss"
UpperCAmelCase_ = generic_train(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , logging_callback=SeqaSeqLoggingCallback() , checkpoint_callback=get_checkpoint_callback(
args.output_dir , model.val_metric , args.save_top_k , __SCREAMING_SNAKE_CASE ) , early_stopping_callback=__SCREAMING_SNAKE_CASE , logger=__SCREAMING_SNAKE_CASE , )
pickle_save(model.hparams , model.output_dir / "hparams.pkl" )
if not args.do_predict:
return model
UpperCAmelCase_ = ""
UpperCAmelCase_ = sorted(glob.glob(os.path.join(args.output_dir , "*.ckpt" ) , recursive=__SCREAMING_SNAKE_CASE ) )
if checkpoints:
UpperCAmelCase_ = checkpoints[-1]
UpperCAmelCase_ = checkpoints[-1]
trainer.logger.log_hyperparams(model.hparams )
# test() without a model tests using the best checkpoint automatically
trainer.test()
return model
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
SCREAMING_SNAKE_CASE = pl.Trainer.add_argparse_args(parser)
SCREAMING_SNAKE_CASE = SummarizationModule.add_model_specific_args(parser, os.getcwd())
SCREAMING_SNAKE_CASE = parser.parse_args()
main(args)
| 23 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = 'xlm-roberta'
def __init__( self , lowerCAmelCase=3_0522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def A__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 23 | 1 |
SCREAMING_SNAKE_CASE = tuple[float, float, float]
SCREAMING_SNAKE_CASE = tuple[float, float, float]
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Vectorad:
UpperCAmelCase_ = end_pointa[0] - end_pointa[0]
UpperCAmelCase_ = end_pointa[1] - end_pointa[1]
UpperCAmelCase_ = end_pointa[2] - end_pointa[2]
return (x, y, z)
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Vectorad:
UpperCAmelCase_ = ab[1] * ac[2] - ab[2] * ac[1] # *i
UpperCAmelCase_ = (ab[0] * ac[2] - ab[2] * ac[0]) * -1 # *j
UpperCAmelCase_ = ab[0] * ac[1] - ab[1] * ac[0] # *k
return (x, y, z)
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> bool:
return tuple(round(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) for x in vector ) == (0, 0, 0)
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 10 ) -> bool:
UpperCAmelCase_ = create_vector(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = create_vector(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return is_zero_vector(get_ad_vectors_cross(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , __SCREAMING_SNAKE_CASE )
| 23 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCAmelCase_ = "-" if number.startswith("-" ) else ""
UpperCAmelCase_ = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 23 | 1 |
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
SCREAMING_SNAKE_CASE = "platform"
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : List[str] = PegasusConfig
lowerCAmelCase_ : str = {}
lowerCAmelCase_ : str = 'gelu'
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=20 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase=0 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
def A__ ( self ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
UpperCAmelCase_ = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ = np.concatenate([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ = prepare_pegasus_inputs_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, inputs_dict
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(lowerCAmelCase )
UpperCAmelCase_ = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase_ , UpperCAmelCase_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="i4" )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=lowerCAmelCase , )
UpperCAmelCase_ = model.decode(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(lowerCAmelCase )
UpperCAmelCase_ = model.encode(inputs_dict["input_ids"] )
UpperCAmelCase_ , UpperCAmelCase_ = (
inputs_dict["decoder_input_ids"],
inputs_dict["decoder_attention_mask"],
)
UpperCAmelCase_ = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
UpperCAmelCase_ = model.init_cache(decoder_input_ids.shape[0] , lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, :-1] , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
UpperCAmelCase_ = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ = model.decode(
decoder_input_ids[:, -1:] , lowerCAmelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=lowerCAmelCase , decoder_position_ids=lowerCAmelCase , )
UpperCAmelCase_ = model.decode(lowerCAmelCase , lowerCAmelCase , decoder_attention_mask=lowerCAmelCase )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ) -> List[Any]:
if attention_mask is None:
UpperCAmelCase_ = np.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
lowerCAmelCase_ : List[str] = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
lowerCAmelCase_ : List[Any] = True
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : str = False
def A__ ( self ):
UpperCAmelCase_ = FlaxPegasusModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCAmelCase )
def A__ ( self ):
self.config_tester.run_common_tests()
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = model_class(lowerCAmelCase )
@jax.jit
def encode_jitted(lowerCAmelCase , lowerCAmelCase=None , **lowerCAmelCase ):
return model.encode(input_ids=lowerCAmelCase , attention_mask=lowerCAmelCase )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ = encode_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ = encode_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = model.encode(inputs_dict["input_ids"] , inputs_dict["attention_mask"] )
UpperCAmelCase_ = {
"decoder_input_ids": inputs_dict["decoder_input_ids"],
"decoder_attention_mask": inputs_dict["decoder_attention_mask"],
"encoder_outputs": encoder_outputs,
}
@jax.jit
def decode_jitted(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
return model.decode(
decoder_input_ids=lowerCAmelCase , decoder_attention_mask=lowerCAmelCase , encoder_outputs=lowerCAmelCase , )
with self.subTest("JIT Enabled" ):
UpperCAmelCase_ = decode_jitted(**lowerCAmelCase ).to_tuple()
with self.subTest("JIT Disabled" ):
with jax.disable_jit():
UpperCAmelCase_ = decode_jitted(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
for jitted_output, output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def A__ ( self ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("google/pegasus-large" , from_pt=lowerCAmelCase )
UpperCAmelCase_ = np.ones((1, 1) )
UpperCAmelCase_ = model(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@slow
def A__ ( self ):
UpperCAmelCase_ = FlaxPegasusForConditionalGeneration.from_pretrained("google/pegasus-xsum" )
UpperCAmelCase_ = PegasusTokenizer.from_pretrained("google/pegasus-xsum" )
UpperCAmelCase_ = [
" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.",
" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" ",
]
UpperCAmelCase_ = [
"California's largest electricity provider has turned off power to hundreds of thousands of customers.",
"Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.",
]
UpperCAmelCase_ = tokenizer(lowerCAmelCase , return_tensors="np" , truncation=lowerCAmelCase , max_length=512 , padding=lowerCAmelCase )
UpperCAmelCase_ = model.generate(**lowerCAmelCase , num_beams=2 ).sequences
UpperCAmelCase_ = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
assert tgt_text == decoded
| 23 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase ).to(lowerCAmelCase )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = model(input_ids.to(lowerCAmelCase ) , labels=labels.to(lowerCAmelCase ) ).loss
UpperCAmelCase_ = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 23 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
UpperCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 23 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@slow
@require_torch
def A__ ( self ):
UpperCAmelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ = bertabert.config.encoder.vocab_size
UpperCAmelCase_ = tokenizer.sep_token_id
UpperCAmelCase_ = tokenizer.cls_token_id
UpperCAmelCase_ = 128
UpperCAmelCase_ = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
UpperCAmelCase_ = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
UpperCAmelCase_ = train_dataset.select(range(32 ) )
UpperCAmelCase_ = val_dataset.select(range(16 ) )
UpperCAmelCase_ = 4
def _map_to_encoder_decoder_inputs(lowerCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCAmelCase_ = tokenizer(batch["article"] , padding="max_length" , truncation=lowerCAmelCase , max_length=512 )
UpperCAmelCase_ = tokenizer(batch["highlights"] , padding="max_length" , truncation=lowerCAmelCase , max_length=128 )
UpperCAmelCase_ = inputs.input_ids
UpperCAmelCase_ = inputs.attention_mask
UpperCAmelCase_ = outputs.input_ids
UpperCAmelCase_ = outputs.input_ids.copy()
UpperCAmelCase_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
UpperCAmelCase_ = outputs.attention_mask
assert all(len(lowerCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(lowerCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCAmelCase ):
UpperCAmelCase_ = pred.label_ids
UpperCAmelCase_ = pred.predictions
# all unnecessary tokens are removed
UpperCAmelCase_ = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCAmelCase ) )] ) / len(lowerCAmelCase )
return {"accuracy": accuracy}
# map train dataset
UpperCAmelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase , batch_size=lowerCAmelCase , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
UpperCAmelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase , batch_size=lowerCAmelCase , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = SeqaSeqTrainingArguments(
output_dir=lowerCAmelCase , per_device_train_batch_size=lowerCAmelCase , per_device_eval_batch_size=lowerCAmelCase , predict_with_generate=lowerCAmelCase , evaluation_strategy="steps" , do_train=lowerCAmelCase , do_eval=lowerCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCAmelCase_ = SeqaSeqTrainer(
model=lowerCAmelCase , args=lowerCAmelCase , compute_metrics=_compute_metrics , train_dataset=lowerCAmelCase , eval_dataset=lowerCAmelCase , tokenizer=lowerCAmelCase , )
# start training
trainer.train()
| 23 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[List[ImageInput]]:
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ['pixel_values']
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size["shortest_edge"] , default_to_square=lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(lowerCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(lowerCAmelCase , size=lowerCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase )
return image
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(lowerCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=lowerCAmelCase , do_resize=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , do_center_crop=lowerCAmelCase , crop_size=lowerCAmelCase , do_rescale=lowerCAmelCase , rescale_factor=lowerCAmelCase , do_normalize=lowerCAmelCase , image_mean=lowerCAmelCase , image_std=lowerCAmelCase , data_format=lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 23 | 1 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = data
UpperCAmelCase_ = [0x6745_2301, 0xEFCD_AB89, 0x98BA_DCFE, 0x1032_5476, 0xC3D2_E1F0]
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
return ((n << b) | (n >> (32 - b))) & 0xFFFF_FFFF
def A__ ( self ):
UpperCAmelCase_ = b"\x80" + b"\x00" * (63 - (len(self.data ) + 8) % 64)
UpperCAmelCase_ = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def A__ ( self ):
return [
self.padded_data[i : i + 64] for i in range(0 , len(self.padded_data ) , 64 )
]
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = list(struct.unpack(">16L" , lowerCAmelCase ) ) + [0] * 64
for i in range(16 , 80 ):
UpperCAmelCase_ = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 14] ^ w[i - 16]) , 1 )
return w
def A__ ( self ):
UpperCAmelCase_ = self.padding()
UpperCAmelCase_ = self.split_blocks()
for block in self.blocks:
UpperCAmelCase_ = self.expand_block(lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.h
for i in range(0 , 80 ):
if 0 <= i < 20:
UpperCAmelCase_ = (b & c) | ((~b) & d)
UpperCAmelCase_ = 0x5A82_7999
elif 20 <= i < 40:
UpperCAmelCase_ = b ^ c ^ d
UpperCAmelCase_ = 0x6ED9_EBA1
elif 40 <= i < 60:
UpperCAmelCase_ = (b & c) | (b & d) | (c & d)
UpperCAmelCase_ = 0x8F1B_BCDC
elif 60 <= i < 80:
UpperCAmelCase_ = b ^ c ^ d
UpperCAmelCase_ = 0xCA62_C1D6
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (
self.rotate(lowerCAmelCase , 5 ) + f + e + k + expanded_block[i] & 0xFFFF_FFFF,
a,
self.rotate(lowerCAmelCase , 30 ),
c,
d,
)
UpperCAmelCase_ = (
self.h[0] + a & 0xFFFF_FFFF,
self.h[1] + b & 0xFFFF_FFFF,
self.h[2] + c & 0xFFFF_FFFF,
self.h[3] + d & 0xFFFF_FFFF,
self.h[4] + e & 0xFFFF_FFFF,
)
return ("{:08x}" * 5).format(*self.h )
def snake_case__ ( ) -> int:
UpperCAmelCase_ = B"Test String"
assert SHAaHash(__SCREAMING_SNAKE_CASE ).final_hash() == hashlib.shaa(__SCREAMING_SNAKE_CASE ).hexdigest() # noqa: S324
def snake_case__ ( ) -> Any:
UpperCAmelCase_ = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
UpperCAmelCase_ = parser.parse_args()
UpperCAmelCase_ = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
UpperCAmelCase_ = f.read()
else:
UpperCAmelCase_ = bytes(__SCREAMING_SNAKE_CASE , "utf-8" )
print(SHAaHash(__SCREAMING_SNAKE_CASE ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 23 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 0
while number > 0:
UpperCAmelCase_ = number % 10
sum_of_digits += last_digit
UpperCAmelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case__ ( __SCREAMING_SNAKE_CASE = 100 ) -> int:
UpperCAmelCase_ = factorial(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = split_and_add(__SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 23 | 1 |
import os
import time
import pytest
from datasets.utils.filelock import FileLock, Timeout
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ = FileLock(str(tmpdir / "foo.lock" ) )
UpperCAmelCase_ = 0.01
with locka.acquire():
with pytest.raises(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = time.time()
locka.acquire(__SCREAMING_SNAKE_CASE )
assert time.time() - _start > timeout
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Any:
UpperCAmelCase_ = "a" * 1000 + ".lock"
UpperCAmelCase_ = FileLock(str(tmpdir / filename ) )
assert locka._lock_file.endswith(".lock" )
assert not locka._lock_file.endswith(__SCREAMING_SNAKE_CASE )
assert len(os.path.basename(locka._lock_file ) ) <= 255
UpperCAmelCase_ = FileLock(tmpdir / filename )
with locka.acquire():
with pytest.raises(__SCREAMING_SNAKE_CASE ):
locka.acquire(0 )
| 23 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 1 |
from statistics import mean, stdev
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 3 ) -> list:
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = max(__SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , __SCREAMING_SNAKE_CASE ) for x in data]
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 3 ) -> list:
UpperCAmelCase_ = mean(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = stdev(__SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , __SCREAMING_SNAKE_CASE ) for x in data]
| 23 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
# Initialise PyTorch model
UpperCAmelCase_ = MobileBertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ = MobileBertForPreTraining(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
UpperCAmelCase_ = load_tf_weights_in_mobilebert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ : Optional[str] = field(
default=lowercase__, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase_ : Optional[str] = field(
default=lowercase__, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase_ : Optional[str] = field(
default=lowercase__, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
lowerCAmelCase_ : bool = field(
default=lowercase__, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, )
lowerCAmelCase_ : str = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
lowerCAmelCase_ : bool = field(
default=lowercase__, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : Optional[str] = field(default=lowercase__, metadata={'help': 'The input training data file (a text file).'} )
lowerCAmelCase_ : Optional[str] = field(
default=lowercase__, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'}, )
lowerCAmelCase_ : bool = field(
default=lowercase__, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ : Optional[int] = field(
default=lowercase__, metadata={'help': 'The number of processes to use for the preprocessing.'}, )
lowerCAmelCase_ : Optional[int] = field(
default=lowercase__, metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
lowerCAmelCase_ : bool = field(
default=lowercase__, metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
}, )
lowerCAmelCase_ : Optional[int] = field(
default=lowercase__, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
lowerCAmelCase_ : Optional[int] = field(
default=lowercase__, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
def A__ ( self ):
if self.train_file is not None:
UpperCAmelCase_ = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCAmelCase_ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : PreTrainedTokenizerBase
lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = True
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[int] = None
def __call__( self , lowerCAmelCase ):
UpperCAmelCase_ = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase_ = [feature.pop(lowerCAmelCase ) for feature in features]
UpperCAmelCase_ = len(lowerCAmelCase )
UpperCAmelCase_ = len(features[0]["input_ids"] )
UpperCAmelCase_ = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase )] for feature in features
]
UpperCAmelCase_ = list(chain(*lowerCAmelCase ) )
UpperCAmelCase_ = self.tokenizer.pad(
lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
UpperCAmelCase_ = {k: v.view(lowerCAmelCase , lowerCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
UpperCAmelCase_ = torch.tensor(lowerCAmelCase , dtype=torch.intaa )
return batch
def snake_case__ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase_ = training_args.get_process_log_level()
logger.setLevel(__SCREAMING_SNAKE_CASE )
datasets.utils.logging.set_verbosity(__SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(__SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCAmelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCAmelCase_ = {}
if data_args.train_file is not None:
UpperCAmelCase_ = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase_ = data_args.validation_file
UpperCAmelCase_ = data_args.train_file.split("." )[-1]
UpperCAmelCase_ = load_dataset(
__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCAmelCase_ = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCAmelCase_ = [f'''ending{i}''' for i in range(4 )]
UpperCAmelCase_ = "sent1"
UpperCAmelCase_ = "sent2"
if data_args.max_seq_length is None:
UpperCAmelCase_ = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
UpperCAmelCase_ = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCAmelCase_ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = [[context] * 4 for context in examples[context_name]]
UpperCAmelCase_ = examples[question_header_name]
UpperCAmelCase_ = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(__SCREAMING_SNAKE_CASE )
]
# Flatten out
UpperCAmelCase_ = list(chain(*__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ = list(chain(*__SCREAMING_SNAKE_CASE ) )
# Tokenize
UpperCAmelCase_ = tokenizer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
UpperCAmelCase_ = raw_datasets["train"]
if data_args.max_train_samples is not None:
UpperCAmelCase_ = min(len(__SCREAMING_SNAKE_CASE ) , data_args.max_train_samples )
UpperCAmelCase_ = train_dataset.select(range(__SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
UpperCAmelCase_ = train_dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
UpperCAmelCase_ = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
UpperCAmelCase_ = min(len(__SCREAMING_SNAKE_CASE ) , data_args.max_eval_samples )
UpperCAmelCase_ = eval_dataset.select(range(__SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
UpperCAmelCase_ = eval_dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCAmelCase_ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ , UpperCAmelCase_ = eval_predictions
UpperCAmelCase_ = np.argmax(__SCREAMING_SNAKE_CASE , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCAmelCase_ = Trainer(
model=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__SCREAMING_SNAKE_CASE , data_collator=__SCREAMING_SNAKE_CASE , compute_metrics=__SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
UpperCAmelCase_ = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase_ = last_checkpoint
UpperCAmelCase_ = trainer.train(resume_from_checkpoint=__SCREAMING_SNAKE_CASE )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase_ = train_result.metrics
UpperCAmelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__SCREAMING_SNAKE_CASE )
)
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("train" , __SCREAMING_SNAKE_CASE )
trainer.save_metrics("train" , __SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ = trainer.evaluate()
UpperCAmelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("eval" , __SCREAMING_SNAKE_CASE )
trainer.save_metrics("eval" , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**__SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 23 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = str(id_ )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = []
UpperCAmelCase_ = {} # {vertex:distance}
def __lt__( self , lowerCAmelCase ):
return self.key < other.key
def __repr__( self ):
return self.id
def A__ ( self , lowerCAmelCase ):
self.neighbors.append(lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = weight
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __SCREAMING_SNAKE_CASE )
graph[b - 1].add_edge(graph[a - 1] , __SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list:
UpperCAmelCase_ = []
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = graph[:]
while q:
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE )
q.remove(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Iterator[tuple]:
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
hq.heapify(__SCREAMING_SNAKE_CASE )
while h:
UpperCAmelCase_ = hq.heappop(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
hq.heapify(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 1 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ = len(__SCREAMING_SNAKE_CASE ), len(grid[0] )
if (
min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
UpperCAmelCase_ = 0
count += depth_first_search(__SCREAMING_SNAKE_CASE , row + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE , row - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , col + 1 , __SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , col - 1 , __SCREAMING_SNAKE_CASE )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 1 |
from __future__ import annotations
import math
import numpy as np
from numpy.linalg import norm
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float:
return math.sqrt(sum(pow(a - b , 2 ) for a, b in zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) ) )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list[list[list[float] | float]]:
if dataset.ndim != value_array.ndim:
UpperCAmelCase_ = (
"Wrong input data's dimensions... "
f'''dataset : {dataset.ndim}, value_array : {value_array.ndim}'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
try:
if dataset.shape[1] != value_array.shape[1]:
UpperCAmelCase_ = (
"Wrong input data's shape... "
f'''dataset : {dataset.shape[1]}, value_array : {value_array.shape[1]}'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
except IndexError:
if dataset.ndim != value_array.ndim:
raise TypeError("Wrong shape" )
if dataset.dtype != value_array.dtype:
UpperCAmelCase_ = (
"Input data have different datatype... "
f'''dataset : {dataset.dtype}, value_array : {value_array.dtype}'''
)
raise TypeError(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = []
for value in value_array:
UpperCAmelCase_ = euclidean(__SCREAMING_SNAKE_CASE , dataset[0] )
UpperCAmelCase_ = dataset[0].tolist()
for dataset_value in dataset[1:]:
UpperCAmelCase_ = euclidean(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if dist > temp_dist:
UpperCAmelCase_ = temp_dist
UpperCAmelCase_ = dataset_value.tolist()
answer.append([vector, dist] )
return answer
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> float:
return np.dot(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) / (norm(__SCREAMING_SNAKE_CASE ) * norm(__SCREAMING_SNAKE_CASE ))
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": 512,
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : List[str] = RetriBertTokenizer
lowerCAmelCase_ : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 1 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass(frozen=lowercase__ )
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : str
lowerCAmelCase_ : str
lowerCAmelCase_ : Optional[str] = None
lowerCAmelCase_ : Optional[str] = None
lowerCAmelCase_ : Optional[str] = None
@dataclass(frozen=lowercase__ )
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : List[int]
lowerCAmelCase_ : Optional[List[int]] = None
lowerCAmelCase_ : Optional[List[int]] = None
lowerCAmelCase_ : Optional[Union[int, float]] = None
lowerCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[InputFeatures]
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase=False , lowerCAmelCase = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = os.path.join(
lowerCAmelCase , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(lowerCAmelCase ) , lowerCAmelCase , ) , )
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ = cached_features_file + ".lock"
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
UpperCAmelCase_ = torch.load(lowerCAmelCase )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
UpperCAmelCase_ = (
processor.get_dev_examples(lowerCAmelCase ) if evaluate else processor.get_train_examples(lowerCAmelCase )
)
logger.info("Training examples: %s" , len(lowerCAmelCase ) )
UpperCAmelCase_ = hans_convert_examples_to_features(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
logger.info("Saving features into cached file %s" , lowerCAmelCase )
torch.save(self.features , lowerCAmelCase )
def __len__( self ):
return len(self.features )
def __getitem__( self , lowerCAmelCase ):
return self.features[i]
def A__ ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : List[InputFeatures]
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 128 , lowerCAmelCase=False , lowerCAmelCase = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
UpperCAmelCase_ = processor.get_dev_examples(lowerCAmelCase ) if evaluate else processor.get_train_examples(lowerCAmelCase )
UpperCAmelCase_ = hans_convert_examples_to_features(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(lowerCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_ = tf.data.Dataset.from_generator(
lowerCAmelCase , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A__ ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self , lowerCAmelCase ):
return self.features[i]
def A__ ( self ):
return self.label_list
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def A__ ( self , lowerCAmelCase ):
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase , "heuristics_train_set.txt" ) ) , "train" )
def A__ ( self , lowerCAmelCase ):
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase , "heuristics_evaluation_set.txt" ) ) , "dev" )
def A__ ( self ):
return ["contradiction", "entailment", "neutral"]
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = []
for i, line in enumerate(lowerCAmelCase ):
if i == 0:
continue
UpperCAmelCase_ = "%s-%s" % (set_type, line[0])
UpperCAmelCase_ = line[5]
UpperCAmelCase_ = line[6]
UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7]
UpperCAmelCase_ = line[0]
examples.append(InputExample(guid=lowerCAmelCase , text_a=lowerCAmelCase , text_b=lowerCAmelCase , label=lowerCAmelCase , pairID=lowerCAmelCase ) )
return examples
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> int:
UpperCAmelCase_ = {label: i for i, label in enumerate(__SCREAMING_SNAKE_CASE )}
UpperCAmelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(__SCREAMING_SNAKE_CASE ) , desc="convert examples to features" ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCAmelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="max_length" , truncation=__SCREAMING_SNAKE_CASE , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_ = int(example.pairID )
features.append(InputFeatures(**__SCREAMING_SNAKE_CASE , label=__SCREAMING_SNAKE_CASE , pairID=__SCREAMING_SNAKE_CASE ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f'''guid: {example}''' )
logger.info(f'''features: {features[i]}''' )
return features
SCREAMING_SNAKE_CASE = {
"hans": 3,
}
SCREAMING_SNAKE_CASE = {
"hans": HansProcessor,
}
| 23 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : int = ['input_ids', 'attention_mask']
lowerCAmelCase_ : str = DistilBertTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 1 |
import collections
import os
from typing import List, Optional, Tuple
from transformers.utils import is_jieba_available, requires_backends
if is_jieba_available():
import jieba
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"openbmb/cpm-ant-10b": "https://huggingface.co/openbmb/cpm-ant-10b/blob/main/vocab.txt",
},
}
SCREAMING_SNAKE_CASE = {
"openbmb/cpm-ant-10b": 1024,
}
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Dict:
UpperCAmelCase_ = collections.OrderedDict()
with open(__SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" ) as reader:
UpperCAmelCase_ = reader.readlines()
for index, token in enumerate(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = token.rstrip("\n" )
UpperCAmelCase_ = index
return vocab
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase="<unk>" , lowerCAmelCase=200 ):
UpperCAmelCase_ = vocab
UpperCAmelCase_ = unk_token
UpperCAmelCase_ = max_input_chars_per_word
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = list(lowerCAmelCase )
if len(lowerCAmelCase ) > self.max_input_chars_per_word:
return [self.unk_token]
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
while start < len(lowerCAmelCase ):
UpperCAmelCase_ = len(lowerCAmelCase )
UpperCAmelCase_ = None
while start < end:
UpperCAmelCase_ = "".join(chars[start:end] )
if substr in self.vocab:
UpperCAmelCase_ = substr
break
end -= 1
if cur_substr is None:
sub_tokens.append(self.unk_token )
start += 1
else:
sub_tokens.append(lowerCAmelCase )
UpperCAmelCase_ = end
return sub_tokens
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = VOCAB_FILES_NAMES
lowerCAmelCase_ : Tuple = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : str = ['input_ids', 'attention_mask']
lowerCAmelCase_ : Optional[int] = False
def __init__( self , lowerCAmelCase , lowerCAmelCase="<d>" , lowerCAmelCase="</d>" , lowerCAmelCase="<s>" , lowerCAmelCase="</s>" , lowerCAmelCase="<pad>" , lowerCAmelCase="<unk>" , lowerCAmelCase="</n>" , lowerCAmelCase="</_>" , lowerCAmelCase="left" , **lowerCAmelCase , ):
requires_backends(self , ["jieba"] )
super().__init__(
bod_token=lowerCAmelCase , eod_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , pad_token=lowerCAmelCase , unk_token=lowerCAmelCase , line_token=lowerCAmelCase , space_token=lowerCAmelCase , padding_side=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = bod_token
UpperCAmelCase_ = eod_token
UpperCAmelCase_ = load_vocab(lowerCAmelCase )
UpperCAmelCase_ = self.encoder[space_token]
UpperCAmelCase_ = self.encoder[line_token]
del self.encoder[space_token]
del self.encoder[line_token]
UpperCAmelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase : x[1] ) )
UpperCAmelCase_ = {v: k for k, v in self.encoder.items()}
UpperCAmelCase_ = WordpieceTokenizer(vocab=self.encoder , unk_token=self.unk_token )
@property
def A__ ( self ):
return self.encoder[self.bod_token]
@property
def A__ ( self ):
return self.encoder[self.eod_token]
@property
def A__ ( self ):
return self.encoder["\n"]
@property
def A__ ( self ):
return len(self.encoder )
def A__ ( self ):
return dict(self.encoder , **self.added_tokens_encoder )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = []
for x in jieba.cut(lowerCAmelCase , cut_all=lowerCAmelCase ):
output_tokens.extend(self.wordpiece_tokenizer.tokenize(lowerCAmelCase ) )
return output_tokens
def A__ ( self , lowerCAmelCase , **lowerCAmelCase ):
UpperCAmelCase_ = [i for i in token_ids if i >= 0]
UpperCAmelCase_ = [
x for x in token_ids if x != self.pad_token_id and x != self.eos_token_id and x != self.bos_token_id
]
return super()._decode(lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
return token in self.encoder
def A__ ( self , lowerCAmelCase ):
return "".join(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
return self.encoder.get(lowerCAmelCase , self.encoder.get(self.unk_token ) )
def A__ ( self , lowerCAmelCase ):
return self.decoder.get(lowerCAmelCase , self.unk_token )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
if os.path.isdir(lowerCAmelCase ):
UpperCAmelCase_ = os.path.join(
lowerCAmelCase , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
else:
UpperCAmelCase_ = (filename_prefix + "-" if filename_prefix else "") + save_directory
UpperCAmelCase_ = 0
if " " in self.encoder:
UpperCAmelCase_ = self.encoder[" "]
del self.encoder[" "]
if "\n" in self.encoder:
UpperCAmelCase_ = self.encoder["\n"]
del self.encoder["\n"]
UpperCAmelCase_ = collections.OrderedDict(sorted(self.encoder.items() , key=lambda lowerCAmelCase : x[1] ) )
with open(lowerCAmelCase , "w" , encoding="utf-8" ) as writer:
for token, token_index in self.encoder.items():
if index != token_index:
logger.warning(
f'''Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive.'''
" Please check that the vocabulary is not corrupted!" )
UpperCAmelCase_ = token_index
writer.write(token + "\n" )
index += 1
return (vocab_file,)
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
if token_ids_a is None:
return [self.bos_token_id] + token_ids_a
return [self.bos_token_id] + token_ids_a + [self.bos_token_id] + token_ids_a
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=lowerCAmelCase , token_ids_a=lowerCAmelCase , already_has_special_tokens=lowerCAmelCase )
if token_ids_a is not None:
return [1] + ([0] * len(lowerCAmelCase )) + [1] + ([0] * len(lowerCAmelCase ))
return [1] + ([0] * len(lowerCAmelCase ))
| 23 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.array:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
UpperCAmelCase_ = "f32le"
UpperCAmelCase_ = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase_ = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
UpperCAmelCase_ = output_stream[0]
UpperCAmelCase_ = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "f32le" , ) -> Dict:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
if format_for_conversion == "s16le":
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCAmelCase_ = platform.system()
if system == "Linux":
UpperCAmelCase_ = "alsa"
UpperCAmelCase_ = "default"
elif system == "Darwin":
UpperCAmelCase_ = "avfoundation"
UpperCAmelCase_ = ":0"
elif system == "Windows":
UpperCAmelCase_ = "dshow"
UpperCAmelCase_ = "default"
UpperCAmelCase_ = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase_ = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "f32le" , ) -> int:
if stream_chunk_s is not None:
UpperCAmelCase_ = stream_chunk_s
else:
UpperCAmelCase_ = chunk_length_s
UpperCAmelCase_ = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
UpperCAmelCase_ = np.intaa
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = np.floataa
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCAmelCase_ = chunk_length_s / 6
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
UpperCAmelCase_ = [stride_length_s, stride_length_s]
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase_ = datetime.datetime.now()
UpperCAmelCase_ = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
UpperCAmelCase_ = np.frombuffer(item["raw"] , dtype=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
UpperCAmelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Dict:
UpperCAmelCase_ = B""
UpperCAmelCase_ , UpperCAmelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCAmelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
UpperCAmelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase_ = (_stride_left, stride_right)
UpperCAmelCase_ = {"raw": acc[:chunk_len], "stride": stride}
if stream:
UpperCAmelCase_ = False
yield item
UpperCAmelCase_ = stride_left
UpperCAmelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
UpperCAmelCase_ = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
UpperCAmelCase_ = False
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
UpperCAmelCase_ = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 23 | 1 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCAmelCase_ = "-" if number.startswith("-" ) else ""
UpperCAmelCase_ = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 23 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 768 , ):
super().__init__()
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.ones(1 , lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase = None , lowerCAmelCase = None , ):
UpperCAmelCase_ = nn.Parameter(self.mean.to(lowerCAmelCase ).to(lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(self.std.to(lowerCAmelCase ).to(lowerCAmelCase ) )
return self
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds * self.std) + self.mean
return embeds
| 23 | 1 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def snake_case__ ( ) -> List[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__SCREAMING_SNAKE_CASE ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def snake_case__ ( ) -> int:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def snake_case__ ( ) -> List[Any]:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__SCREAMING_SNAKE_CASE ):
http_head("https://huggingface.co" )
| 23 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
if prompt is not None:
UpperCAmelCase_ = prompt
if generate_kwargs is not None:
UpperCAmelCase_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCAmelCase_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
UpperCAmelCase_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCAmelCase , **lowerCAmelCase ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
UpperCAmelCase_ = self.model.config.model_type
if model_type == "git":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(text=lowerCAmelCase , add_special_tokens=lowerCAmelCase ).input_ids
UpperCAmelCase_ = [self.tokenizer.cls_token_id] + input_ids
UpperCAmelCase_ = torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , header_text=lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCAmelCase_ = None
return model_inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCAmelCase )
and all(x is None for x in model_inputs["input_ids"] )
):
UpperCAmelCase_ = None
if generate_kwargs is None:
UpperCAmelCase_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCAmelCase_ = model_inputs.pop(self.model.main_input_name )
UpperCAmelCase_ = self.model.generate(lowerCAmelCase , **lowerCAmelCase , **lowerCAmelCase )
return model_outputs
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = []
for output_ids in model_outputs:
UpperCAmelCase_ = {
"generated_text": self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , )
}
records.append(lowerCAmelCase )
return records
| 23 | 1 |
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, PegasusConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFPegasusForConditionalGeneration, TFPegasusModel
@require_tf
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : Dict = PegasusConfig
lowerCAmelCase_ : Any = {}
lowerCAmelCase_ : Optional[int] = 'gelu'
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=40 , lowerCAmelCase=2 , lowerCAmelCase=1 , lowerCAmelCase=0 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = eos_token_id
UpperCAmelCase_ = pad_token_id
UpperCAmelCase_ = bos_token_id
def A__ ( self ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
UpperCAmelCase_ = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
UpperCAmelCase_ = tf.concat([input_ids, eos_tensor] , axis=1 )
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
UpperCAmelCase_ = prepare_pegasus_inputs_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return config, inputs_dict
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = TFPegasusModel(config=lowerCAmelCase ).get_decoder()
UpperCAmelCase_ = inputs_dict["input_ids"]
UpperCAmelCase_ = input_ids[:1, :]
UpperCAmelCase_ = inputs_dict["attention_mask"][:1, :]
UpperCAmelCase_ = inputs_dict["head_mask"]
UpperCAmelCase_ = 1
# first forward pass
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , head_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
UpperCAmelCase_ = tf.concat([input_ids, next_tokens] , axis=-1 )
UpperCAmelCase_ = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase )[0]
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
UpperCAmelCase_ = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx]
UpperCAmelCase_ = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(lowerCAmelCase , lowerCAmelCase , rtol=1e-3 )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=None , ) -> Dict:
if attention_mask is None:
UpperCAmelCase_ = tf.cast(tf.math.not_equal(__SCREAMING_SNAKE_CASE , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
UpperCAmelCase_ = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
UpperCAmelCase_ = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
UpperCAmelCase_ = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Any = (TFPegasusForConditionalGeneration, TFPegasusModel) if is_tf_available() else ()
lowerCAmelCase_ : Optional[int] = (TFPegasusForConditionalGeneration,) if is_tf_available() else ()
lowerCAmelCase_ : Optional[Any] = (
{
'conversational': TFPegasusForConditionalGeneration,
'feature-extraction': TFPegasusModel,
'summarization': TFPegasusForConditionalGeneration,
'text2text-generation': TFPegasusForConditionalGeneration,
'translation': TFPegasusForConditionalGeneration,
}
if is_tf_available()
else {}
)
lowerCAmelCase_ : int = True
lowerCAmelCase_ : Tuple = False
lowerCAmelCase_ : List[str] = False
def A__ ( self ):
UpperCAmelCase_ = TFPegasusModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCAmelCase )
def A__ ( self ):
self.config_tester.run_common_tests()
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*lowerCAmelCase )
@require_sentencepiece
@require_tokenizers
@require_tf
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = [
' PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.',
' The London trio are up for best UK act and best album, as well as getting two nominations in the best song category."We got told like this morning \'Oh I think you\'re nominated\'", said Dappy."And I was like \'Oh yeah, which one?\' And now we\'ve got nominated for four awards. I mean, wow!"Bandmate Fazer added: "We thought it\'s best of us to come down and mingle with everyone and say hello to the cameras. And now we find we\'ve got four nominations."The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn\'t be too disappointed if they didn\'t win this time around."At the end of the day we\'re grateful to be where we are in our careers."If it don\'t happen then it don\'t happen - live to fight another day and keep on making albums and hits for the fans."Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers\' All These Things That I\'ve Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year\'s Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border."We just done Edinburgh the other day," said Dappy."We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!" ',
]
lowerCAmelCase_ : str = [
'California\'s largest electricity provider has cut power to hundreds of thousands of customers in an effort to'
' reduce the risk of wildfires.',
'N-Dubz have revealed they\'re "grateful" to have been nominated for four Mobo Awards.',
] # differs slightly from pytorch, likely due to numerical differences in linear layers
lowerCAmelCase_ : Any = 'google/pegasus-xsum'
@cached_property
def A__ ( self ):
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def A__ ( self ):
UpperCAmelCase_ = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def A__ ( self , **lowerCAmelCase ):
UpperCAmelCase_ = self.translate_src_text(**lowerCAmelCase )
assert self.expected_text == generated_words
def A__ ( self , **lowerCAmelCase ):
UpperCAmelCase_ = self.tokenizer(self.src_text , **lowerCAmelCase , padding=lowerCAmelCase , return_tensors="tf" )
UpperCAmelCase_ = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 , use_cache=lowerCAmelCase , )
UpperCAmelCase_ = self.tokenizer.batch_decode(generated_ids.numpy() , skip_special_tokens=lowerCAmelCase )
return generated_words
@slow
def A__ ( self ):
self._assert_generated_batch_equal_expected()
| 23 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = TextToVideoSDPipeline
lowerCAmelCase_ : Dict = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowerCAmelCase_ : Optional[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def A__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(lowerCAmelCase )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def A__ ( self ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = TextToVideoSDPipeline(**lowerCAmelCase )
UpperCAmelCase_ = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase_ = "np"
UpperCAmelCase_ = sd_pipe(**lowerCAmelCase ).frames
UpperCAmelCase_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
UpperCAmelCase_ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=1e-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def A__ ( self ):
pass
def A__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=25 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 23 | 1 |
from math import factorial
def snake_case__ ( __SCREAMING_SNAKE_CASE = 20 ) -> int:
UpperCAmelCase_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCAmelCase_ = n // 2
return int(factorial(__SCREAMING_SNAKE_CASE ) / (factorial(__SCREAMING_SNAKE_CASE ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
SCREAMING_SNAKE_CASE = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 23 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0
UpperCAmelCase_ = ugly_nums[ia] * 2
UpperCAmelCase_ = ugly_nums[ia] * 3
UpperCAmelCase_ = ugly_nums[ia] * 5
for _ in range(1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
ugly_nums.append(__SCREAMING_SNAKE_CASE )
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 23 | 1 |
import logging
import os
from typing import List, TextIO, Union
from conllu import parse_incr
from utils_ner import InputExample, Split, TokenClassificationTask
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase=-1 ):
# in NER datasets, the last column is usually reserved for NER label
UpperCAmelCase_ = label_idx
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = mode.value
UpperCAmelCase_ = os.path.join(lowerCAmelCase , f'''{mode}.txt''' )
UpperCAmelCase_ = 1
UpperCAmelCase_ = []
with open(lowerCAmelCase , encoding="utf-8" ) as f:
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for line in f:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCAmelCase , labels=lowerCAmelCase ) )
guid_index += 1
UpperCAmelCase_ = []
UpperCAmelCase_ = []
else:
UpperCAmelCase_ = line.split(" " )
words.append(splits[0] )
if len(lowerCAmelCase ) > 1:
labels.append(splits[self.label_idx].replace("\n" , "" ) )
else:
# Examples could have no label for mode = "test"
labels.append("O" )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCAmelCase , labels=lowerCAmelCase ) )
return examples
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = 0
for line in test_input_reader:
if line.startswith("-DOCSTART-" ) or line == "" or line == "\n":
writer.write(lowerCAmelCase )
if not preds_list[example_id]:
example_id += 1
elif preds_list[example_id]:
UpperCAmelCase_ = line.split()[0] + " " + preds_list[example_id].pop(0 ) + "\n"
writer.write(lowerCAmelCase )
else:
logger.warning("Maximum sequence length exceeded: No prediction for '%s'." , line.split()[0] )
def A__ ( self , lowerCAmelCase ):
if path:
with open(lowerCAmelCase , "r" ) as f:
UpperCAmelCase_ = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase_ = ["O"] + labels
return labels
else:
return ["O", "B-MISC", "I-MISC", "B-PER", "I-PER", "B-ORG", "I-ORG", "B-LOC", "I-LOC"]
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self ):
# in CONLL2003 dataset chunk column is second-to-last
super().__init__(label_idx=-2 )
def A__ ( self , lowerCAmelCase ):
if path:
with open(lowerCAmelCase , "r" ) as f:
UpperCAmelCase_ = f.read().splitlines()
if "O" not in labels:
UpperCAmelCase_ = ["O"] + labels
return labels
else:
return [
"O",
"B-ADVP",
"B-INTJ",
"B-LST",
"B-PRT",
"B-NP",
"B-SBAR",
"B-VP",
"B-ADJP",
"B-CONJP",
"B-PP",
"I-ADVP",
"I-INTJ",
"I-LST",
"I-PRT",
"I-NP",
"I-SBAR",
"I-VP",
"I-ADJP",
"I-CONJP",
"I-PP",
]
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = mode.value
UpperCAmelCase_ = os.path.join(lowerCAmelCase , f'''{mode}.txt''' )
UpperCAmelCase_ = 1
UpperCAmelCase_ = []
with open(lowerCAmelCase , encoding="utf-8" ) as f:
for sentence in parse_incr(lowerCAmelCase ):
UpperCAmelCase_ = []
UpperCAmelCase_ = []
for token in sentence:
words.append(token["form"] )
labels.append(token["upos"] )
assert len(lowerCAmelCase ) == len(lowerCAmelCase )
if words:
examples.append(InputExample(guid=f'''{mode}-{guid_index}''' , words=lowerCAmelCase , labels=lowerCAmelCase ) )
guid_index += 1
return examples
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = 0
for sentence in parse_incr(lowerCAmelCase ):
UpperCAmelCase_ = preds_list[example_id]
UpperCAmelCase_ = ""
for token in sentence:
out += f'''{token['form']} ({token['upos']}|{s_p.pop(0 )}) '''
out += "\n"
writer.write(lowerCAmelCase )
example_id += 1
def A__ ( self , lowerCAmelCase ):
if path:
with open(lowerCAmelCase , "r" ) as f:
return f.read().splitlines()
else:
return [
"ADJ",
"ADP",
"ADV",
"AUX",
"CCONJ",
"DET",
"INTJ",
"NOUN",
"NUM",
"PART",
"PRON",
"PROPN",
"PUNCT",
"SCONJ",
"SYM",
"VERB",
"X",
]
| 23 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=224 , lowerCAmelCase=1000 , lowerCAmelCase=[3, 3, 6, 4] , lowerCAmelCase=[48, 56, 112, 220] , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = layer_depths
UpperCAmelCase_ = embed_dims
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1e-5 , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ):
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = self.prepare_config_and_inputs()
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCAmelCase_ : int = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[Any] = False
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def A__ ( self ):
pass
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def A__ ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def A__ ( self ):
pass
def A__ ( self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
def _config_zero_init(lowerCAmelCase ):
UpperCAmelCase_ = copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1e-1_0 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
UpperCAmelCase_ = _config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self ):
pass
def snake_case__ ( ) -> str:
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(lowerCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 23 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 1 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(__SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(__SCREAMING_SNAKE_CASE )
}
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
UpperCAmelCase_ = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
UpperCAmelCase_ = renew_vae_resnet_paths(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = {"old": f'''down.{i}.block''', "new": f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
UpperCAmelCase_ = renew_vae_resnet_paths(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = {"old": f'''mid.block_{i}''', "new": f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
conv_attn_to_linear(__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
UpperCAmelCase_ = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
UpperCAmelCase_ = renew_vae_resnet_paths(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = {"old": f'''up.{block_id}.block''', "new": f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
UpperCAmelCase_ = renew_vae_resnet_paths(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = {"old": f'''mid.block_{i}''', "new": f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
conv_attn_to_linear(__SCREAMING_SNAKE_CASE )
return new_checkpoint
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> List[str]:
# Only support V1
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = 512
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(__SCREAMING_SNAKE_CASE , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = torch.load(__SCREAMING_SNAKE_CASE , map_location=__SCREAMING_SNAKE_CASE )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = AutoencoderKL(**__SCREAMING_SNAKE_CASE )
vae.load_state_dict(__SCREAMING_SNAKE_CASE )
vae.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
SCREAMING_SNAKE_CASE = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 23 |
import math
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list[int]:
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = int(math.sqrt(__SCREAMING_SNAKE_CASE ) ) # Size of every segment
UpperCAmelCase_ = [True] * (end + 1)
UpperCAmelCase_ = []
while start <= end:
if temp[start] is True:
in_prime.append(__SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
start += 1
prime += in_prime
UpperCAmelCase_ = end + 1
UpperCAmelCase_ = min(2 * end , __SCREAMING_SNAKE_CASE )
while low <= n:
UpperCAmelCase_ = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__SCREAMING_SNAKE_CASE , high + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase_ = high + 1
UpperCAmelCase_ = min(high + end , __SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 23 | 1 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = [0] * len(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = []
UpperCAmelCase_ = [1] * len(__SCREAMING_SNAKE_CASE )
for values in graph.values():
for i in values:
indegree[i] += 1
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if indegree[i] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
while queue:
UpperCAmelCase_ = queue.pop(0 )
for x in graph[vertex]:
indegree[x] -= 1
if long_dist[vertex] + 1 > long_dist[x]:
UpperCAmelCase_ = long_dist[vertex] + 1
if indegree[x] == 0:
queue.append(__SCREAMING_SNAKE_CASE )
print(max(__SCREAMING_SNAKE_CASE ) )
# Adjacency list of Graph
SCREAMING_SNAKE_CASE = {0: [2, 3, 4], 1: [2, 7], 2: [5], 3: [5, 7], 4: [7], 5: [6], 6: [7], 7: []}
longest_distance(graph)
| 23 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : torch.FloatTensor
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 32 , lowerCAmelCase = 64 , lowerCAmelCase = 20 , lowerCAmelCase = 768 , lowerCAmelCase=77 , lowerCAmelCase=4 , lowerCAmelCase = 0.0 , lowerCAmelCase = "silu" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "linear" , lowerCAmelCase = "prd" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ):
super().__init__()
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = attention_head_dim
UpperCAmelCase_ = num_attention_heads * attention_head_dim
UpperCAmelCase_ = additional_embeddings
UpperCAmelCase_ = time_embed_dim or inner_dim
UpperCAmelCase_ = embedding_proj_dim or embedding_dim
UpperCAmelCase_ = clip_embed_dim or embedding_dim
UpperCAmelCase_ = Timesteps(lowerCAmelCase , lowerCAmelCase , 0 )
UpperCAmelCase_ = TimestepEmbedding(lowerCAmelCase , lowerCAmelCase , out_dim=lowerCAmelCase , act_fn=lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if embedding_proj_norm_type is None:
UpperCAmelCase_ = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if encoder_hid_proj_type is None:
UpperCAmelCase_ = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCAmelCase ) )
if added_emb_type == "prd":
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , 1 , lowerCAmelCase ) )
elif added_emb_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , dropout=lowerCAmelCase , activation_fn="gelu" , attention_bias=lowerCAmelCase , )
for d in range(lowerCAmelCase )
] )
if norm_in_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
elif norm_in_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , lowerCAmelCase , persistent=lowerCAmelCase )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ):
UpperCAmelCase_ = {}
def fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
UpperCAmelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return processors
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
module.set_processor(lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
self.set_attn_processor(AttnProcessor() )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , ):
UpperCAmelCase_ = hidden_states.shape[0]
UpperCAmelCase_ = timestep
if not torch.is_tensor(lowerCAmelCase ):
UpperCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ = timesteps * torch.ones(lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase_ = self.time_proj(lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ = self.time_embedding(lowerCAmelCase )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ = self.embedding_proj_norm(lowerCAmelCase )
UpperCAmelCase_ = self.embedding_proj(lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ = self.encoder_hidden_states_proj(lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
UpperCAmelCase_ = self.proj_in(lowerCAmelCase )
UpperCAmelCase_ = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ = hidden_states[:, None, :]
UpperCAmelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCAmelCase , -1 , -1 )
additional_embeds.append(lowerCAmelCase )
UpperCAmelCase_ = torch.cat(
lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ = F.pad(
lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
UpperCAmelCase_ = F.pad(lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ = self.norm_in(lowerCAmelCase )
for block in self.transformer_blocks:
UpperCAmelCase_ = block(lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase_ = self.norm_out(lowerCAmelCase )
if self.prd_embedding is not None:
UpperCAmelCase_ = hidden_states[:, -1]
else:
UpperCAmelCase_ = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ = self.proj_to_clip_embeddings(lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 23 | 1 |
import math
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__SCREAMING_SNAKE_CASE ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def snake_case__ ( __SCREAMING_SNAKE_CASE = 1_0001 ) -> int:
try:
UpperCAmelCase_ = int(__SCREAMING_SNAKE_CASE )
except (TypeError, ValueError):
raise TypeError("Parameter nth must be int or castable to int." ) from None
if nth <= 0:
raise ValueError("Parameter nth must be greater than or equal to one." )
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
while len(__SCREAMING_SNAKE_CASE ) < nth:
if is_prime(__SCREAMING_SNAKE_CASE ):
primes.append(__SCREAMING_SNAKE_CASE )
num += 1
else:
num += 1
return primes[len(__SCREAMING_SNAKE_CASE ) - 1]
if __name__ == "__main__":
print(f'''{solution() = }''')
| 23 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 1 |
def snake_case__ ( __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 22 ) -> int:
UpperCAmelCase_ = range(1 , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = range(1 , __SCREAMING_SNAKE_CASE )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 23 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = 'xlm-roberta'
def __init__( self , lowerCAmelCase=3_0522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def A__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 23 | 1 |
from typing import TYPE_CHECKING
from ....utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
"configuration_mctct": ["MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MCTCTConfig"],
"feature_extraction_mctct": ["MCTCTFeatureExtractor"],
"processing_mctct": ["MCTCTProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MCTCTForCTC",
"MCTCTModel",
"MCTCTPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mctct import MCTCT_PRETRAINED_CONFIG_ARCHIVE_MAP, MCTCTConfig
from .feature_extraction_mctct import MCTCTFeatureExtractor
from .processing_mctct import MCTCTProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mctct import MCTCT_PRETRAINED_MODEL_ARCHIVE_LIST, MCTCTForCTC, MCTCTModel, MCTCTPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCAmelCase_ = "-" if number.startswith("-" ) else ""
UpperCAmelCase_ = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 23 | 1 |
SCREAMING_SNAKE_CASE = "Input must be a string of 8 numbers plus letter"
SCREAMING_SNAKE_CASE = "TRWAGMYFPDXBNJZSQVHLCKE"
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> bool:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = f'''Expected string as input, found {type(__SCREAMING_SNAKE_CASE ).__name__}'''
raise TypeError(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = spanish_id.replace("-" , "" ).upper()
if len(__SCREAMING_SNAKE_CASE ) != 9:
raise ValueError(__SCREAMING_SNAKE_CASE )
try:
UpperCAmelCase_ = int(spanish_id_clean[0:8] )
UpperCAmelCase_ = spanish_id_clean[8]
except ValueError as ex:
raise ValueError(__SCREAMING_SNAKE_CASE ) from ex
if letter.isdigit():
raise ValueError(__SCREAMING_SNAKE_CASE )
return letter == LOOKUP_LETTERS[number % 23]
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase ).to(lowerCAmelCase )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = model(input_ids.to(lowerCAmelCase ) , labels=labels.to(lowerCAmelCase ) ).loss
UpperCAmelCase_ = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 23 | 1 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list:
UpperCAmelCase_ = int(__SCREAMING_SNAKE_CASE )
if n_element < 1:
UpperCAmelCase_ = ValueError("a should be a positive number" )
raise my_error
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = (0, 0, 0)
UpperCAmelCase_ = 1
while index < n_element:
while hamming_list[i] * 2 <= hamming_list[-1]:
i += 1
while hamming_list[j] * 3 <= hamming_list[-1]:
j += 1
while hamming_list[k] * 5 <= hamming_list[-1]:
k += 1
hamming_list.append(
min(hamming_list[i] * 2 , hamming_list[j] * 3 , hamming_list[k] * 5 ) )
index += 1
return hamming_list
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = input("Enter the last number (nth term) of the Hamming Number Series: ")
print("Formula of Hamming Number Series => 2^i * 3^j * 5^k")
SCREAMING_SNAKE_CASE = hamming(int(n))
print("-----------------------------------------------------")
print(f'''The list with nth numbers is: {hamming_numbers}''')
print("-----------------------------------------------------")
| 23 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
UpperCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 23 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
SCREAMING_SNAKE_CASE = {
"configuration_tapas": ["TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP", "TapasConfig"],
"tokenization_tapas": ["TapasTokenizer"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TapasForMaskedLM",
"TapasForQuestionAnswering",
"TapasForSequenceClassification",
"TapasModel",
"TapasPreTrainedModel",
"load_tf_weights_in_tapas",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFTapasForMaskedLM",
"TFTapasForQuestionAnswering",
"TFTapasForSequenceClassification",
"TFTapasModel",
"TFTapasPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_tapas import TAPAS_PRETRAINED_CONFIG_ARCHIVE_MAP, TapasConfig
from .tokenization_tapas import TapasTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tapas import (
TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TapasForMaskedLM,
TapasForQuestionAnswering,
TapasForSequenceClassification,
TapasModel,
TapasPreTrainedModel,
load_tf_weights_in_tapas,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_tapas import (
TF_TAPAS_PRETRAINED_MODEL_ARCHIVE_LIST,
TFTapasForMaskedLM,
TFTapasForQuestionAnswering,
TFTapasForSequenceClassification,
TFTapasModel,
TFTapasPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[List[ImageInput]]:
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ['pixel_values']
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size["shortest_edge"] , default_to_square=lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(lowerCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(lowerCAmelCase , size=lowerCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase )
return image
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(lowerCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=lowerCAmelCase , do_resize=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , do_center_crop=lowerCAmelCase , crop_size=lowerCAmelCase , do_rescale=lowerCAmelCase , rescale_factor=lowerCAmelCase , do_normalize=lowerCAmelCase , image_mean=lowerCAmelCase , image_std=lowerCAmelCase , data_format=lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 23 | 1 |
from __future__ import annotations
import math
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list[int]:
if num <= 0:
UpperCAmelCase_ = f'''{num}: Invalid input, please enter a positive integer.'''
raise ValueError(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = [True] * (num + 1)
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = int(math.sqrt(__SCREAMING_SNAKE_CASE ) )
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(__SCREAMING_SNAKE_CASE )
# Set multiples of start be False
for i in range(start * start , num + 1 , __SCREAMING_SNAKE_CASE ):
if sieve[i] is True:
UpperCAmelCase_ = False
start += 1
for j in range(end + 1 , num + 1 ):
if sieve[j] is True:
prime.append(__SCREAMING_SNAKE_CASE )
return prime
if __name__ == "__main__":
print(prime_sieve(int(input("Enter a positive integer: ").strip())))
| 23 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 0
while number > 0:
UpperCAmelCase_ = number % 10
sum_of_digits += last_digit
UpperCAmelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case__ ( __SCREAMING_SNAKE_CASE = 100 ) -> int:
UpperCAmelCase_ = factorial(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = split_and_add(__SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 23 | 1 |
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
SCREAMING_SNAKE_CASE = (3, 9, -11, 0, 7, 5, 1, -1)
SCREAMING_SNAKE_CASE = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : int
lowerCAmelCase_ : Node | None
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = None
for i in sorted(lowerCAmelCase , reverse=lowerCAmelCase ):
UpperCAmelCase_ = Node(lowerCAmelCase , self.head )
def __iter__( self ):
UpperCAmelCase_ = self.head
while node:
yield node.data
UpperCAmelCase_ = node.next_node
def __len__( self ):
return sum(1 for _ in self )
def __str__( self ):
return " -> ".join([str(lowerCAmelCase ) for node in self] )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> SortedLinkedList:
return SortedLinkedList(list(__SCREAMING_SNAKE_CASE ) + list(__SCREAMING_SNAKE_CASE ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
SCREAMING_SNAKE_CASE = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even)))
| 23 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
# Initialise PyTorch model
UpperCAmelCase_ = MobileBertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ = MobileBertForPreTraining(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
UpperCAmelCase_ = load_tf_weights_in_mobilebert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 | 1 |
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
SCREAMING_SNAKE_CASE = "src/transformers"
SCREAMING_SNAKE_CASE = "docs/source/en/tasks"
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
with open(__SCREAMING_SNAKE_CASE , "r" , encoding="utf-8" , newline="\n" ) as f:
UpperCAmelCase_ = f.readlines()
# Find the start prompt.
UpperCAmelCase_ = 0
while not lines[start_index].startswith(__SCREAMING_SNAKE_CASE ):
start_index += 1
start_index += 1
UpperCAmelCase_ = start_index
while not lines[end_index].startswith(__SCREAMING_SNAKE_CASE ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
SCREAMING_SNAKE_CASE = direct_transformers_import(TRANSFORMERS_PATH)
SCREAMING_SNAKE_CASE = {
"asr.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
"audio_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
"language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
"image_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
"masked_language_modeling.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
"multiple_choice.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
"object_detection.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
"question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
"semantic_segmentation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
"sequence_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
"summarization.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"token_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
"translation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
"video_classification.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
"document_question_answering.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
"monocular_depth_estimation.md": transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
SCREAMING_SNAKE_CASE = {
"summarization.md": ("nllb",),
"translation.md": ("nllb",),
}
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[Any]:
UpperCAmelCase_ = TASK_GUIDE_TO_MODELS[task_guide]
UpperCAmelCase_ = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(__SCREAMING_SNAKE_CASE , set() )
UpperCAmelCase_ = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([f'''[{name}](../model_doc/{code})''' for code, name in model_names.items()] ) + "\n"
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Dict:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = _find_text_in_file(
filename=os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , start_prompt="<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->" , end_prompt="<!--End of the generated tip-->" , )
UpperCAmelCase_ = get_model_list_for_task(__SCREAMING_SNAKE_CASE )
if current_list != new_list:
if overwrite:
with open(os.path.join(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , "w" , encoding="utf-8" , newline="\n" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
f'''The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`'''
" to fix this." )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--fix_and_overwrite", action="store_true", help="Whether to fix inconsistencies.")
SCREAMING_SNAKE_CASE = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite)
| 23 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = str(id_ )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = []
UpperCAmelCase_ = {} # {vertex:distance}
def __lt__( self , lowerCAmelCase ):
return self.key < other.key
def __repr__( self ):
return self.id
def A__ ( self , lowerCAmelCase ):
self.neighbors.append(lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = weight
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __SCREAMING_SNAKE_CASE )
graph[b - 1].add_edge(graph[a - 1] , __SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list:
UpperCAmelCase_ = []
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = graph[:]
while q:
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE )
q.remove(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Iterator[tuple]:
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
hq.heapify(__SCREAMING_SNAKE_CASE )
while h:
UpperCAmelCase_ = hq.heappop(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
hq.heapify(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 1 |
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TextClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
SCREAMING_SNAKE_CASE = {"LayoutLMv2Config", "LayoutLMv3Config"}
@is_pipeline_test
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
lowerCAmelCase_ : Optional[int] = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
lowerCAmelCase_ : Any = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
lowerCAmelCase_ : int = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
@require_torch
def A__ ( self ):
UpperCAmelCase_ = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" )
UpperCAmelCase_ = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
UpperCAmelCase_ = text_classifier("This is great !" , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}] )
UpperCAmelCase_ = text_classifier(["This is great !", "This is bad"] , top_k=2 )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
UpperCAmelCase_ = text_classifier("This is great !" , top_k=1 )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
# Legacy behavior
UpperCAmelCase_ = text_classifier("This is great !" , return_all_scores=lowerCAmelCase )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
UpperCAmelCase_ = text_classifier("This is great !" , return_all_scores=lowerCAmelCase )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}]] )
UpperCAmelCase_ = text_classifier(["This is great !", "Something else"] , return_all_scores=lowerCAmelCase )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
[{"label": "LABEL_0", "score": 0.504}, {"label": "LABEL_1", "score": 0.496}],
] , )
UpperCAmelCase_ = text_classifier(["This is great !", "Something else"] , return_all_scores=lowerCAmelCase )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [
{"label": "LABEL_0", "score": 0.504},
{"label": "LABEL_0", "score": 0.504},
] , )
@require_torch
def A__ ( self ):
import torch
UpperCAmelCase_ = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="pt" , device=torch.device("cpu" ) , )
UpperCAmelCase_ = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
@require_tf
def A__ ( self ):
UpperCAmelCase_ = pipeline(
task="text-classification" , model="hf-internal-testing/tiny-random-distilbert" , framework="tf" )
UpperCAmelCase_ = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{"label": "LABEL_0", "score": 0.504}] )
@slow
@require_torch
def A__ ( self ):
UpperCAmelCase_ = pipeline("text-classification" )
UpperCAmelCase_ = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{"label": "POSITIVE", "score": 1.0}] )
UpperCAmelCase_ = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{"label": "NEGATIVE", "score": 1.0}] )
UpperCAmelCase_ = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{"label": "POSITIVE", "score": 0.988}] )
@slow
@require_tf
def A__ ( self ):
UpperCAmelCase_ = pipeline("text-classification" , framework="tf" )
UpperCAmelCase_ = text_classifier("This is great !" )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{"label": "POSITIVE", "score": 1.0}] )
UpperCAmelCase_ = text_classifier("This is bad !" )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{"label": "NEGATIVE", "score": 1.0}] )
UpperCAmelCase_ = text_classifier("Birds are a type of animal" )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{"label": "POSITIVE", "score": 0.988}] )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = TextClassificationPipeline(model=lowerCAmelCase , tokenizer=lowerCAmelCase )
return text_classifier, ["HuggingFace is in", "This is another test"]
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = text_classifier.model
# Small inputs because BartTokenizer tiny has maximum position embeddings = 22
UpperCAmelCase_ = "HuggingFace is in"
UpperCAmelCase_ = text_classifier(lowerCAmelCase )
self.assertEqual(nested_simplify(lowerCAmelCase ) , [{"label": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase )}] )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
UpperCAmelCase_ = ["HuggingFace is in ", "Paris is in France"]
UpperCAmelCase_ = text_classifier(lowerCAmelCase )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [{"label": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase )}, {"label": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
self.assertTrue(outputs[1]["label"] in model.config.idalabel.values() )
# Forcing to get all results with `top_k=None`
# This is NOT the legacy format
UpperCAmelCase_ = text_classifier(lowerCAmelCase , top_k=lowerCAmelCase )
UpperCAmelCase_ = len(model.config.idalabel.values() )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [[{"label": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase )}] * N, [{"label": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase )}] * N] , )
UpperCAmelCase_ = {"text": "HuggingFace is in ", "text_pair": "Paris is in France"}
UpperCAmelCase_ = text_classifier(lowerCAmelCase )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , {"label": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase )} , )
self.assertTrue(outputs["label"] in model.config.idalabel.values() )
# This might be used a text pair, but tokenizer + pipe interaction
# makes it hard to understand that it's not using the pair properly
# https://github.com/huggingface/transformers/issues/17305
# We disabled this usage instead as it was outputting wrong outputs.
UpperCAmelCase_ = [["HuggingFace is in ", "Paris is in France"]]
with self.assertRaises(lowerCAmelCase ):
text_classifier(lowerCAmelCase )
# This used to be valid for doing text pairs
# We're keeping it working because of backward compatibility
UpperCAmelCase_ = text_classifier([[["HuggingFace is in ", "Paris is in France"]]] )
self.assertEqual(
nested_simplify(lowerCAmelCase ) , [{"label": ANY(lowerCAmelCase ), "score": ANY(lowerCAmelCase )}] , )
self.assertTrue(outputs[0]["label"] in model.config.idalabel.values() )
| 23 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 1 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
)
@flax.struct.dataclass
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : jnp.ndarray
lowerCAmelCase_ : jnp.ndarray
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
lowerCAmelCase_ : int
lowerCAmelCase_ : Tuple[int] = (16, 32, 96, 256)
lowerCAmelCase_ : jnp.dtype = jnp.floataa
def A__ ( self ):
UpperCAmelCase_ = nn.Conv(
self.block_out_channels[0] , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
UpperCAmelCase_ = []
for i in range(len(self.block_out_channels ) - 1 ):
UpperCAmelCase_ = self.block_out_channels[i]
UpperCAmelCase_ = self.block_out_channels[i + 1]
UpperCAmelCase_ = nn.Conv(
lowerCAmelCase , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCAmelCase )
UpperCAmelCase_ = nn.Conv(
lowerCAmelCase , kernel_size=(3, 3) , strides=(2, 2) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
blocks.append(lowerCAmelCase )
UpperCAmelCase_ = blocks
UpperCAmelCase_ = nn.Conv(
self.conditioning_embedding_channels , kernel_size=(3, 3) , padding=((1, 1), (1, 1)) , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowerCAmelCase ):
UpperCAmelCase_ = self.conv_in(lowerCAmelCase )
UpperCAmelCase_ = nn.silu(lowerCAmelCase )
for block in self.blocks:
UpperCAmelCase_ = block(lowerCAmelCase )
UpperCAmelCase_ = nn.silu(lowerCAmelCase )
UpperCAmelCase_ = self.conv_out(lowerCAmelCase )
return embedding
@flax_register_to_config
class lowerCamelCase ( nn.Module, lowercase__, lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : int = 32
lowerCAmelCase_ : int = 4
lowerCAmelCase_ : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
lowerCAmelCase_ : Union[bool, Tuple[bool]] = False
lowerCAmelCase_ : Tuple[int] = (320, 640, 1280, 1280)
lowerCAmelCase_ : int = 2
lowerCAmelCase_ : Union[int, Tuple[int]] = 8
lowerCAmelCase_ : Optional[Union[int, Tuple[int]]] = None
lowerCAmelCase_ : int = 1280
lowerCAmelCase_ : float = 0.0
lowerCAmelCase_ : bool = False
lowerCAmelCase_ : jnp.dtype = jnp.floataa
lowerCAmelCase_ : bool = True
lowerCAmelCase_ : int = 0
lowerCAmelCase_ : str = "rgb"
lowerCAmelCase_ : Tuple[int] = (16, 32, 96, 256)
def A__ ( self , lowerCAmelCase ):
# init input tensors
UpperCAmelCase_ = (1, self.in_channels, self.sample_size, self.sample_size)
UpperCAmelCase_ = jnp.zeros(lowerCAmelCase , dtype=jnp.floataa )
UpperCAmelCase_ = jnp.ones((1,) , dtype=jnp.intaa )
UpperCAmelCase_ = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
UpperCAmelCase_ = (1, 3, self.sample_size * 8, self.sample_size * 8)
UpperCAmelCase_ = jnp.zeros(lowerCAmelCase , dtype=jnp.floataa )
UpperCAmelCase_ , UpperCAmelCase_ = jax.random.split(lowerCAmelCase )
UpperCAmelCase_ = {"params": params_rng, "dropout": dropout_rng}
return self.init(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )["params"]
def A__ ( self ):
UpperCAmelCase_ = self.block_out_channels
UpperCAmelCase_ = block_out_channels[0] * 4
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
UpperCAmelCase_ = self.num_attention_heads or self.attention_head_dim
# input
UpperCAmelCase_ = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
UpperCAmelCase_ = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
UpperCAmelCase_ = FlaxTimestepEmbedding(lowerCAmelCase , dtype=self.dtype )
UpperCAmelCase_ = FlaxControlNetConditioningEmbedding(
conditioning_embedding_channels=block_out_channels[0] , block_out_channels=self.conditioning_embedding_out_channels , )
UpperCAmelCase_ = self.only_cross_attention
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = (only_cross_attention,) * len(self.down_block_types )
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = (num_attention_heads,) * len(self.down_block_types )
# down
UpperCAmelCase_ = []
UpperCAmelCase_ = []
UpperCAmelCase_ = block_out_channels[0]
UpperCAmelCase_ = nn.Conv(
lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCAmelCase )
for i, down_block_type in enumerate(self.down_block_types ):
UpperCAmelCase_ = output_channel
UpperCAmelCase_ = block_out_channels[i]
UpperCAmelCase_ = i == len(lowerCAmelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
UpperCAmelCase_ = FlaxCrossAttnDownBlockaD(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , dtype=self.dtype , )
else:
UpperCAmelCase_ = FlaxDownBlockaD(
in_channels=lowerCAmelCase , out_channels=lowerCAmelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(lowerCAmelCase )
for _ in range(self.layers_per_block ):
UpperCAmelCase_ = nn.Conv(
lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCAmelCase )
if not is_final_block:
UpperCAmelCase_ = nn.Conv(
lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
controlnet_down_blocks.append(lowerCAmelCase )
UpperCAmelCase_ = down_blocks
UpperCAmelCase_ = controlnet_down_blocks
# mid
UpperCAmelCase_ = block_out_channels[-1]
UpperCAmelCase_ = FlaxUNetMidBlockaDCrossAttn(
in_channels=lowerCAmelCase , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , dtype=self.dtype , )
UpperCAmelCase_ = nn.Conv(
lowerCAmelCase , kernel_size=(1, 1) , padding="VALID" , kernel_init=nn.initializers.zeros_init() , bias_init=nn.initializers.zeros_init() , dtype=self.dtype , )
def __call__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 1.0 , lowerCAmelCase = True , lowerCAmelCase = False , ):
UpperCAmelCase_ = self.controlnet_conditioning_channel_order
if channel_order == "bgr":
UpperCAmelCase_ = jnp.flip(lowerCAmelCase , axis=1 )
# 1. time
if not isinstance(lowerCAmelCase , jnp.ndarray ):
UpperCAmelCase_ = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(lowerCAmelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ = timesteps.astype(dtype=jnp.floataa )
UpperCAmelCase_ = jnp.expand_dims(lowerCAmelCase , 0 )
UpperCAmelCase_ = self.time_proj(lowerCAmelCase )
UpperCAmelCase_ = self.time_embedding(lowerCAmelCase )
# 2. pre-process
UpperCAmelCase_ = jnp.transpose(lowerCAmelCase , (0, 2, 3, 1) )
UpperCAmelCase_ = self.conv_in(lowerCAmelCase )
UpperCAmelCase_ = jnp.transpose(lowerCAmelCase , (0, 2, 3, 1) )
UpperCAmelCase_ = self.controlnet_cond_embedding(lowerCAmelCase )
sample += controlnet_cond
# 3. down
UpperCAmelCase_ = (sample,)
for down_block in self.down_blocks:
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ , UpperCAmelCase_ = down_block(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , deterministic=not train )
else:
UpperCAmelCase_ , UpperCAmelCase_ = down_block(lowerCAmelCase , lowerCAmelCase , deterministic=not train )
down_block_res_samples += res_samples
# 4. mid
UpperCAmelCase_ = self.mid_block(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , deterministic=not train )
# 5. contronet blocks
UpperCAmelCase_ = ()
for down_block_res_sample, controlnet_block in zip(lowerCAmelCase , self.controlnet_down_blocks ):
UpperCAmelCase_ = controlnet_block(lowerCAmelCase )
controlnet_down_block_res_samples += (down_block_res_sample,)
UpperCAmelCase_ = controlnet_down_block_res_samples
UpperCAmelCase_ = self.controlnet_mid_block(lowerCAmelCase )
# 6. scaling
UpperCAmelCase_ = [sample * conditioning_scale for sample in down_block_res_samples]
mid_block_res_sample *= conditioning_scale
if not return_dict:
return (down_block_res_samples, mid_block_res_sample)
return FlaxControlNetOutput(
down_block_res_samples=lowerCAmelCase , mid_block_res_sample=lowerCAmelCase )
| 23 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": 512,
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : List[str] = RetriBertTokenizer
lowerCAmelCase_ : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 1 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=224 , lowerCAmelCase=1000 , lowerCAmelCase=[3, 3, 6, 4] , lowerCAmelCase=[48, 56, 112, 220] , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = layer_depths
UpperCAmelCase_ = embed_dims
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1e-5 , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ):
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = self.prepare_config_and_inputs()
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCAmelCase_ : int = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[Any] = False
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def A__ ( self ):
pass
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def A__ ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def A__ ( self ):
pass
def A__ ( self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
def _config_zero_init(lowerCAmelCase ):
UpperCAmelCase_ = copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1e-1_0 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
UpperCAmelCase_ = _config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self ):
pass
def snake_case__ ( ) -> str:
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(lowerCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 23 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : int = ['input_ids', 'attention_mask']
lowerCAmelCase_ : str = DistilBertTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 1 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def snake_case__ ( __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE=1026 , __SCREAMING_SNAKE_CASE=True , __SCREAMING_SNAKE_CASE="data/tokenized_stories_train_wikitext103.jbl" , __SCREAMING_SNAKE_CASE="igf_context_pairs.jbl" , ) -> Optional[int]:
set_seed(3 )
# generate train_data and objective_set
UpperCAmelCase_ , UpperCAmelCase_ = generate_datasets(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , number=__SCREAMING_SNAKE_CASE , min_len=1026 , trim=__SCREAMING_SNAKE_CASE )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
UpperCAmelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
UpperCAmelCase_ = load_gpta("gpt2" ).to(__SCREAMING_SNAKE_CASE )
print("computing perplexity on objective set" )
UpperCAmelCase_ = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ).item()
print("perplexity on objective set:" , __SCREAMING_SNAKE_CASE )
# collect igf pairs and save to file demo.jbl
collect_objective_set(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=15 , __SCREAMING_SNAKE_CASE=128 , __SCREAMING_SNAKE_CASE=100 , __SCREAMING_SNAKE_CASE="igf_model.pt" , ) -> Tuple:
set_seed(42 )
# Load pre-trained model
UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
UpperCAmelCase_ = SecondaryLearner(__SCREAMING_SNAKE_CASE )
# Train secondary learner
UpperCAmelCase_ = train_secondary_learner(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_epochs=__SCREAMING_SNAKE_CASE , batch_size=__SCREAMING_SNAKE_CASE , eval_freq=100 , igf_model_path=__SCREAMING_SNAKE_CASE , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=32 , __SCREAMING_SNAKE_CASE=1000 , __SCREAMING_SNAKE_CASE=16 , __SCREAMING_SNAKE_CASE=1.0 , __SCREAMING_SNAKE_CASE=recopy_gpta , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=10 , __SCREAMING_SNAKE_CASE="gpt2_finetuned.pt" , ) -> Tuple:
UpperCAmelCase_ = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
UpperCAmelCase_ = RandomSampler(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = DataLoader(__SCREAMING_SNAKE_CASE , sampler=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = max_steps // (len(__SCREAMING_SNAKE_CASE )) + 1
UpperCAmelCase_ = 0
UpperCAmelCase_ = torch.zeros((1, context_len) , dtype=torch.long , device=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = recopy_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
model.train()
if secondary_learner is not None:
secondary_learner.to(__SCREAMING_SNAKE_CASE )
secondary_learner.eval()
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
UpperCAmelCase_ = []
# Compute the performance of the transformer model at the beginning
UpperCAmelCase_ = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
test_perps.append(__SCREAMING_SNAKE_CASE )
print("Test perplexity, step" , __SCREAMING_SNAKE_CASE , ":" , __SCREAMING_SNAKE_CASE )
for epoch in range(int(__SCREAMING_SNAKE_CASE ) ):
for step, example in enumerate(__SCREAMING_SNAKE_CASE ):
torch.cuda.empty_cache()
UpperCAmelCase_ = random.randint(0 , example.size(2 ) - context_len - 1 )
UpperCAmelCase_ = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
UpperCAmelCase_ = model(__SCREAMING_SNAKE_CASE , labels=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = True
if secondary_learner is not None:
UpperCAmelCase_ = secondary_learner.forward(
torch.tensor(__SCREAMING_SNAKE_CASE , dtype=torch.long , device=__SCREAMING_SNAKE_CASE ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(__SCREAMING_SNAKE_CASE ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
UpperCAmelCase_ = -1
if predicted_q < threshold:
UpperCAmelCase_ = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
UpperCAmelCase_ = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
UpperCAmelCase_ = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
UpperCAmelCase_ = compute_perplexity(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
test_perps.append(__SCREAMING_SNAKE_CASE )
print("Test perplexity, step" , __SCREAMING_SNAKE_CASE , ":" , __SCREAMING_SNAKE_CASE )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def snake_case__ ( ) -> Dict:
UpperCAmelCase_ = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , required=__SCREAMING_SNAKE_CASE , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=__SCREAMING_SNAKE_CASE , default=__SCREAMING_SNAKE_CASE , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=__SCREAMING_SNAKE_CASE , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=__SCREAMING_SNAKE_CASE , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=__SCREAMING_SNAKE_CASE , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=__SCREAMING_SNAKE_CASE , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=__SCREAMING_SNAKE_CASE , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=__SCREAMING_SNAKE_CASE , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=__SCREAMING_SNAKE_CASE , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=__SCREAMING_SNAKE_CASE , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=__SCREAMING_SNAKE_CASE , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=__SCREAMING_SNAKE_CASE , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=__SCREAMING_SNAKE_CASE , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=__SCREAMING_SNAKE_CASE , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=__SCREAMING_SNAKE_CASE , type=__SCREAMING_SNAKE_CASE , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=__SCREAMING_SNAKE_CASE , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
UpperCAmelCase_ = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
UpperCAmelCase_ = training_secondary_learner(
__SCREAMING_SNAKE_CASE , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
UpperCAmelCase_ = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
UpperCAmelCase_ , UpperCAmelCase_ = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=__SCREAMING_SNAKE_CASE )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=__SCREAMING_SNAKE_CASE , secondary_learner=__SCREAMING_SNAKE_CASE , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 23 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.array:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
UpperCAmelCase_ = "f32le"
UpperCAmelCase_ = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase_ = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
UpperCAmelCase_ = output_stream[0]
UpperCAmelCase_ = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "f32le" , ) -> Dict:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
if format_for_conversion == "s16le":
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCAmelCase_ = platform.system()
if system == "Linux":
UpperCAmelCase_ = "alsa"
UpperCAmelCase_ = "default"
elif system == "Darwin":
UpperCAmelCase_ = "avfoundation"
UpperCAmelCase_ = ":0"
elif system == "Windows":
UpperCAmelCase_ = "dshow"
UpperCAmelCase_ = "default"
UpperCAmelCase_ = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase_ = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "f32le" , ) -> int:
if stream_chunk_s is not None:
UpperCAmelCase_ = stream_chunk_s
else:
UpperCAmelCase_ = chunk_length_s
UpperCAmelCase_ = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
UpperCAmelCase_ = np.intaa
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = np.floataa
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCAmelCase_ = chunk_length_s / 6
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
UpperCAmelCase_ = [stride_length_s, stride_length_s]
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase_ = datetime.datetime.now()
UpperCAmelCase_ = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
UpperCAmelCase_ = np.frombuffer(item["raw"] , dtype=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
UpperCAmelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Dict:
UpperCAmelCase_ = B""
UpperCAmelCase_ , UpperCAmelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCAmelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
UpperCAmelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase_ = (_stride_left, stride_right)
UpperCAmelCase_ = {"raw": acc[:chunk_len], "stride": stride}
if stream:
UpperCAmelCase_ = False
yield item
UpperCAmelCase_ = stride_left
UpperCAmelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
UpperCAmelCase_ = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
UpperCAmelCase_ = False
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
UpperCAmelCase_ = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 23 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase ( lowercase__, lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Dict = StableUnCLIPImgaImgPipeline
lowerCAmelCase_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCAmelCase_ : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase_ : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase_ : Optional[int] = frozenset([] )
def A__ ( self ):
UpperCAmelCase_ = 32
UpperCAmelCase_ = embedder_hidden_size
# image encoding components
UpperCAmelCase_ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCAmelCase , projection_dim=lowerCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase_ = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase )
UpperCAmelCase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase , layers_per_block=1 , upcast_attention=lowerCAmelCase , use_linear_projection=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL()
UpperCAmelCase_ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 , lowerCAmelCase=True ):
if str(lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if pil_image:
UpperCAmelCase_ = input_image * 0.5 + 0.5
UpperCAmelCase_ = input_image.clamp(0 , 1 )
UpperCAmelCase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ = DiffusionPipeline.numpy_to_pil(lowerCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A__ ( self ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableUnCLIPImgaImgPipeline(**lowerCAmelCase )
UpperCAmelCase_ = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs(lowerCAmelCase )
inputs.update({"image_embeds": None} )
UpperCAmelCase_ = sd_pipe(**lowerCAmelCase ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ):
UpperCAmelCase_ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCAmelCase )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
UpperCAmelCase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , "anime turle" , generator=lowerCAmelCase , output_type="np" )
UpperCAmelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
UpperCAmelCase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , "anime turle" , generator=lowerCAmelCase , output_type="np" )
UpperCAmelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
UpperCAmelCase_ = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = pipe(
lowerCAmelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 23 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 768 , ):
super().__init__()
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.ones(1 , lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase = None , lowerCAmelCase = None , ):
UpperCAmelCase_ = nn.Parameter(self.mean.to(lowerCAmelCase ).to(lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(self.std.to(lowerCAmelCase ).to(lowerCAmelCase ) )
return self
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds * self.std) + self.mean
return embeds
| 23 | 1 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ = str(bin(__SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
UpperCAmelCase_ = str(bin(__SCREAMING_SNAKE_CASE ) )[2:]
UpperCAmelCase_ = max(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(__SCREAMING_SNAKE_CASE ) , b_binary.zfill(__SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
if prompt is not None:
UpperCAmelCase_ = prompt
if generate_kwargs is not None:
UpperCAmelCase_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCAmelCase_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
UpperCAmelCase_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCAmelCase , **lowerCAmelCase ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
UpperCAmelCase_ = self.model.config.model_type
if model_type == "git":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(text=lowerCAmelCase , add_special_tokens=lowerCAmelCase ).input_ids
UpperCAmelCase_ = [self.tokenizer.cls_token_id] + input_ids
UpperCAmelCase_ = torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , header_text=lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCAmelCase_ = None
return model_inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCAmelCase )
and all(x is None for x in model_inputs["input_ids"] )
):
UpperCAmelCase_ = None
if generate_kwargs is None:
UpperCAmelCase_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCAmelCase_ = model_inputs.pop(self.model.main_input_name )
UpperCAmelCase_ = self.model.generate(lowerCAmelCase , **lowerCAmelCase , **lowerCAmelCase )
return model_outputs
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = []
for output_ids in model_outputs:
UpperCAmelCase_ = {
"generated_text": self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , )
}
records.append(lowerCAmelCase )
return records
| 23 | 1 |
from __future__ import annotations
from sys import maxsize
from typing import Generic, TypeVar
SCREAMING_SNAKE_CASE = TypeVar("T")
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
return (position - 1) // 2
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
return (2 * position) + 1
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
return (2 * position) + 2
class lowerCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
UpperCAmelCase_ = []
UpperCAmelCase_ = {}
UpperCAmelCase_ = 0
def __len__( self ):
return self.elements
def __repr__( self ):
return str(self.heap )
def A__ ( self ):
# Check if the priority queue is empty
return self.elements == 0
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
# Add an element with given priority to the queue
self.heap.append((elem, weight) )
UpperCAmelCase_ = self.elements
self.elements += 1
self._bubble_up(lowerCAmelCase )
def A__ ( self ):
# Remove and return the element with lowest weight (highest priority)
if self.elements > 1:
self._swap_nodes(0 , self.elements - 1 )
UpperCAmelCase_ , UpperCAmelCase_ = self.heap.pop()
del self.position_map[elem]
self.elements -= 1
if self.elements > 0:
UpperCAmelCase_ , UpperCAmelCase_ = self.heap[0]
self._bubble_down(lowerCAmelCase )
return elem
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
# Update the weight of the given key
UpperCAmelCase_ = self.position_map[elem]
UpperCAmelCase_ = (elem, weight)
if position > 0:
UpperCAmelCase_ = get_parent_position(lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._bubble_up(lowerCAmelCase )
else:
self._bubble_down(lowerCAmelCase )
else:
self._bubble_down(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
# Place a node at the proper position (upward movement) [to be used internally
# only]
UpperCAmelCase_ = self.position_map[elem]
if curr_pos == 0:
return None
UpperCAmelCase_ = get_parent_position(lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = self.heap[curr_pos]
UpperCAmelCase_ , UpperCAmelCase_ = self.heap[parent_position]
if parent_weight > weight:
self._swap_nodes(lowerCAmelCase , lowerCAmelCase )
return self._bubble_up(lowerCAmelCase )
return None
def A__ ( self , lowerCAmelCase ):
# Place a node at the proper position (downward movement) [to be used
# internally only]
UpperCAmelCase_ = self.position_map[elem]
UpperCAmelCase_ , UpperCAmelCase_ = self.heap[curr_pos]
UpperCAmelCase_ = get_child_left_position(lowerCAmelCase )
UpperCAmelCase_ = get_child_right_position(lowerCAmelCase )
if child_left_position < self.elements and child_right_position < self.elements:
UpperCAmelCase_ , UpperCAmelCase_ = self.heap[child_left_position]
UpperCAmelCase_ , UpperCAmelCase_ = self.heap[child_right_position]
if child_right_weight < child_left_weight and child_right_weight < weight:
self._swap_nodes(lowerCAmelCase , lowerCAmelCase )
return self._bubble_down(lowerCAmelCase )
if child_left_position < self.elements:
UpperCAmelCase_ , UpperCAmelCase_ = self.heap[child_left_position]
if child_left_weight < weight:
self._swap_nodes(lowerCAmelCase , lowerCAmelCase )
return self._bubble_down(lowerCAmelCase )
else:
return None
if child_right_position < self.elements:
UpperCAmelCase_ , UpperCAmelCase_ = self.heap[child_right_position]
if child_right_weight < weight:
self._swap_nodes(lowerCAmelCase , lowerCAmelCase )
return self._bubble_down(lowerCAmelCase )
return None
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
# Swap the nodes at the given positions
UpperCAmelCase_ = self.heap[nodea_pos][0]
UpperCAmelCase_ = self.heap[nodea_pos][0]
UpperCAmelCase_ , UpperCAmelCase_ = (
self.heap[nodea_pos],
self.heap[nodea_pos],
)
UpperCAmelCase_ = nodea_pos
UpperCAmelCase_ = nodea_pos
class lowerCamelCase ( Generic[T] ):
'''simple docstring'''
def __init__( self ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = 0
def __repr__( self ):
return str(self.connections )
def __len__( self ):
return self.nodes
def A__ ( self , lowerCAmelCase ):
# Add a node in the graph if it is not in the graph
if node not in self.connections:
UpperCAmelCase_ = {}
self.nodes += 1
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
# Add an edge between 2 nodes in the graph
self.add_node(lowerCAmelCase )
self.add_node(lowerCAmelCase )
UpperCAmelCase_ = weight
UpperCAmelCase_ = weight
def snake_case__ ( __SCREAMING_SNAKE_CASE , ) -> tuple[dict[T, int], dict[T, T | None]]:
UpperCAmelCase_ = {node: maxsize for node in graph.connections}
UpperCAmelCase_ = {node: None for node in graph.connections}
UpperCAmelCase_ = MinPriorityQueue()
for node, weight in dist.items():
priority_queue.push(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if priority_queue.is_empty():
return dist, parent
# initialization
UpperCAmelCase_ = priority_queue.extract_min()
UpperCAmelCase_ = 0
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCAmelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__SCREAMING_SNAKE_CASE , dist[neighbour] )
UpperCAmelCase_ = node
# running prim's algorithm
while not priority_queue.is_empty():
UpperCAmelCase_ = priority_queue.extract_min()
for neighbour in graph.connections[node]:
if dist[neighbour] > dist[node] + graph.connections[node][neighbour]:
UpperCAmelCase_ = dist[node] + graph.connections[node][neighbour]
priority_queue.update_key(__SCREAMING_SNAKE_CASE , dist[neighbour] )
UpperCAmelCase_ = node
return dist, parent
| 23 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = TextToVideoSDPipeline
lowerCAmelCase_ : Dict = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowerCAmelCase_ : Optional[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def A__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(lowerCAmelCase )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def A__ ( self ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = TextToVideoSDPipeline(**lowerCAmelCase )
UpperCAmelCase_ = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase_ = "np"
UpperCAmelCase_ = sd_pipe(**lowerCAmelCase ).frames
UpperCAmelCase_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
UpperCAmelCase_ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=1e-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def A__ ( self ):
pass
def A__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=25 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 23 | 1 |
import unittest
from transformers import DonutProcessor
SCREAMING_SNAKE_CASE = "naver-clova-ix/donut-base"
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = DonutProcessor.from_pretrained(lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
UpperCAmelCase_ = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
UpperCAmelCase_ = self.processor.tokenajson(lowerCAmelCase )
self.assertDictEqual(lowerCAmelCase , lowerCAmelCase )
| 23 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0
UpperCAmelCase_ = ugly_nums[ia] * 2
UpperCAmelCase_ = ugly_nums[ia] * 3
UpperCAmelCase_ = ugly_nums[ia] * 5
for _ in range(1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
ugly_nums.append(__SCREAMING_SNAKE_CASE )
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 23 | 1 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
if not isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("only integers accepted as input" )
else:
UpperCAmelCase_ = str(abs(__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ = [list(__SCREAMING_SNAKE_CASE ) for char in range(len(__SCREAMING_SNAKE_CASE ) )]
for index in range(len(__SCREAMING_SNAKE_CASE ) ):
num_transpositions[index].pop(__SCREAMING_SNAKE_CASE )
return max(
int("".join(list(__SCREAMING_SNAKE_CASE ) ) ) for transposition in num_transpositions )
if __name__ == "__main__":
__import__("doctest").testmod()
| 23 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=224 , lowerCAmelCase=1000 , lowerCAmelCase=[3, 3, 6, 4] , lowerCAmelCase=[48, 56, 112, 220] , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = layer_depths
UpperCAmelCase_ = embed_dims
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1e-5 , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ):
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = self.prepare_config_and_inputs()
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCAmelCase_ : int = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[Any] = False
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def A__ ( self ):
pass
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def A__ ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def A__ ( self ):
pass
def A__ ( self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
def _config_zero_init(lowerCAmelCase ):
UpperCAmelCase_ = copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1e-1_0 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
UpperCAmelCase_ = _config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self ):
pass
def snake_case__ ( ) -> str:
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(lowerCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 23 | 1 |
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=12 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=32 , lowerCAmelCase=2 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=0.02 , lowerCAmelCase=0 , lowerCAmelCase=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = projection_dim
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = dropout
UpperCAmelCase_ = attention_dropout
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = bos_token_id
def A__ ( self ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
UpperCAmelCase_ = input_mask.numpy()
UpperCAmelCase_ , UpperCAmelCase_ = input_mask.shape
UpperCAmelCase_ = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase ):
UpperCAmelCase_ = 1
UpperCAmelCase_ = 0
UpperCAmelCase_ = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCAmelCase )
def A__ ( self ):
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = TFBlipTextModel(config=lowerCAmelCase )
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , training=lowerCAmelCase )
UpperCAmelCase_ = model(lowerCAmelCase , training=lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def A__ ( self ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : str = (TFBlipTextModel,) if is_tf_available() else ()
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Optional[int] = False
lowerCAmelCase_ : List[Any] = False
def A__ ( self ):
UpperCAmelCase_ = BlipTextModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=37 )
def A__ ( self ):
self.config_tester.run_common_tests()
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
pass
def A__ ( self ):
pass
@unittest.skip(reason="Blip does not use inputs_embeds" )
def A__ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def A__ ( self ):
pass
@unittest.skip(reason="BlipTextModel has no base class and is not available in MODEL_MAPPING" )
def A__ ( self ):
pass
@slow
def A__ ( self ):
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = TFBlipTextModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def A__ ( self , lowerCAmelCase=True ):
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCAmelCase )
| 23 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 1 |
import re
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list:
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
try:
UpperCAmelCase_ = split_input(__SCREAMING_SNAKE_CASE )
if upper:
UpperCAmelCase_ = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase_ = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
return to_simple_case(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
try:
UpperCAmelCase_ = to_simple_case(__SCREAMING_SNAKE_CASE )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
return to_complex_case(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , "_" )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
return to_complex_case(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , "-" )
if __name__ == "__main__":
__import__("doctest").testmod()
| 23 |
import math
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list[int]:
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = int(math.sqrt(__SCREAMING_SNAKE_CASE ) ) # Size of every segment
UpperCAmelCase_ = [True] * (end + 1)
UpperCAmelCase_ = []
while start <= end:
if temp[start] is True:
in_prime.append(__SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
start += 1
prime += in_prime
UpperCAmelCase_ = end + 1
UpperCAmelCase_ = min(2 * end , __SCREAMING_SNAKE_CASE )
while low <= n:
UpperCAmelCase_ = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__SCREAMING_SNAKE_CASE , high + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase_ = high + 1
UpperCAmelCase_ = min(high + end , __SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 23 | 1 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def snake_case__ ( __SCREAMING_SNAKE_CASE = 8 ) -> str:
UpperCAmelCase_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = i // 3
UpperCAmelCase_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCAmelCase_ = (
chars_incl
+ random(__SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
+ random(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
)
UpperCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
shuffle(__SCREAMING_SNAKE_CASE )
return "".join(__SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
return "".join(secrets.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
pass # Put your code here...
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
pass # Put your code here...
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
pass # Put your code here...
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 8 ) -> bool:
if len(__SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCAmelCase_ = any(char in ascii_uppercase for char in password )
UpperCAmelCase_ = any(char in ascii_lowercase for char in password )
UpperCAmelCase_ = any(char in digits for char in password )
UpperCAmelCase_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def snake_case__ ( ) -> Optional[Any]:
UpperCAmelCase_ = int(input("Please indicate the max length of your password: " ).strip() )
UpperCAmelCase_ = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(__SCREAMING_SNAKE_CASE ) )
print(
"Alternative Password generated:" , alternative_password_generator(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 23 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : torch.FloatTensor
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 32 , lowerCAmelCase = 64 , lowerCAmelCase = 20 , lowerCAmelCase = 768 , lowerCAmelCase=77 , lowerCAmelCase=4 , lowerCAmelCase = 0.0 , lowerCAmelCase = "silu" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "linear" , lowerCAmelCase = "prd" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ):
super().__init__()
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = attention_head_dim
UpperCAmelCase_ = num_attention_heads * attention_head_dim
UpperCAmelCase_ = additional_embeddings
UpperCAmelCase_ = time_embed_dim or inner_dim
UpperCAmelCase_ = embedding_proj_dim or embedding_dim
UpperCAmelCase_ = clip_embed_dim or embedding_dim
UpperCAmelCase_ = Timesteps(lowerCAmelCase , lowerCAmelCase , 0 )
UpperCAmelCase_ = TimestepEmbedding(lowerCAmelCase , lowerCAmelCase , out_dim=lowerCAmelCase , act_fn=lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if embedding_proj_norm_type is None:
UpperCAmelCase_ = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if encoder_hid_proj_type is None:
UpperCAmelCase_ = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCAmelCase ) )
if added_emb_type == "prd":
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , 1 , lowerCAmelCase ) )
elif added_emb_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , dropout=lowerCAmelCase , activation_fn="gelu" , attention_bias=lowerCAmelCase , )
for d in range(lowerCAmelCase )
] )
if norm_in_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
elif norm_in_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , lowerCAmelCase , persistent=lowerCAmelCase )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ):
UpperCAmelCase_ = {}
def fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
UpperCAmelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return processors
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
module.set_processor(lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
self.set_attn_processor(AttnProcessor() )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , ):
UpperCAmelCase_ = hidden_states.shape[0]
UpperCAmelCase_ = timestep
if not torch.is_tensor(lowerCAmelCase ):
UpperCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ = timesteps * torch.ones(lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase_ = self.time_proj(lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ = self.time_embedding(lowerCAmelCase )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ = self.embedding_proj_norm(lowerCAmelCase )
UpperCAmelCase_ = self.embedding_proj(lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ = self.encoder_hidden_states_proj(lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
UpperCAmelCase_ = self.proj_in(lowerCAmelCase )
UpperCAmelCase_ = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ = hidden_states[:, None, :]
UpperCAmelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCAmelCase , -1 , -1 )
additional_embeds.append(lowerCAmelCase )
UpperCAmelCase_ = torch.cat(
lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ = F.pad(
lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
UpperCAmelCase_ = F.pad(lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ = self.norm_in(lowerCAmelCase )
for block in self.transformer_blocks:
UpperCAmelCase_ = block(lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase_ = self.norm_out(lowerCAmelCase )
if self.prd_embedding is not None:
UpperCAmelCase_ = hidden_states[:, -1]
else:
UpperCAmelCase_ = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ = self.proj_to_clip_embeddings(lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 23 | 1 |
import argparse
from collections import defaultdict
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
UpperCAmelCase_ = f'''{file}_{class_name}_{test_name}'''
done_test[_id] += 1
with open(__SCREAMING_SNAKE_CASE , "r" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = f'''class {class_name}('''
UpperCAmelCase_ = f'''{4 * ' '}def {test_name}('''
UpperCAmelCase_ = f'''{8 * ' '}{correct_line.split()[0]}'''
UpperCAmelCase_ = f'''{16 * ' '}{correct_line.split()[0]}'''
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = False
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = []
for line in lines:
if line.startswith(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = True
elif in_class and line.startswith(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = True
elif in_class and in_func and (line.startswith(__SCREAMING_SNAKE_CASE ) or line.startswith(__SCREAMING_SNAKE_CASE )):
UpperCAmelCase_ = len(line.split(correct_line.split()[0] )[0] )
count += 1
if count == done_test[_id]:
UpperCAmelCase_ = True
if in_class and in_func and in_line:
if ")" not in line:
continue
else:
UpperCAmelCase_ = True
if in_class and in_func and in_line and insert_line:
new_lines.append(f'''{spaces * ' '}{correct_line}''' )
UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = UpperCAmelCase_ = False
else:
new_lines.append(__SCREAMING_SNAKE_CASE )
with open(__SCREAMING_SNAKE_CASE , "w" ) as f:
for line in new_lines:
f.write(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Dict:
if fail is not None:
with open(__SCREAMING_SNAKE_CASE , "r" ) as f:
UpperCAmelCase_ = {l.strip() for l in f.readlines()}
else:
UpperCAmelCase_ = None
with open(__SCREAMING_SNAKE_CASE , "r" ) as f:
UpperCAmelCase_ = f.readlines()
UpperCAmelCase_ = defaultdict(__SCREAMING_SNAKE_CASE )
for line in correct_lines:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = line.split(";" )
if test_failures is None or "::".join([file, class_name, test_name] ) in test_failures:
overwrite_file(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--correct_filename", help="filename of tests with expected result")
parser.add_argument("--fail_filename", help="filename of test failures", type=str, default=None)
SCREAMING_SNAKE_CASE = parser.parse_args()
main(args.correct_filename, args.fail_filename)
| 23 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.