code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
from typing import List, Optional
import numpy as np
from ...processing_utils import ProcessorMixin
from ...utils import to_numpy
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : str = 'EncodecFeatureExtractor'
lowerCAmelCase_ : str = ('T5Tokenizer', 'T5TokenizerFast')
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
super().__init__(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = self.feature_extractor
UpperCAmelCase_ = False
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True ):
return self.tokenizer.get_decoder_prompt_ids(task=lowerCAmelCase , language=lowerCAmelCase , no_timestamps=lowerCAmelCase )
def __call__( self , *lowerCAmelCase , **lowerCAmelCase ):
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = kwargs.pop("audio" , lowerCAmelCase )
UpperCAmelCase_ = kwargs.pop("sampling_rate" , lowerCAmelCase )
UpperCAmelCase_ = kwargs.pop("text" , lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if audio is None and text is None:
raise ValueError("You need to specify either an `audio` or `text` input to process." )
if text is not None:
UpperCAmelCase_ = self.tokenizer(lowerCAmelCase , **lowerCAmelCase )
if audio is not None:
UpperCAmelCase_ = self.feature_extractor(lowerCAmelCase , *lowerCAmelCase , sampling_rate=lowerCAmelCase , **lowerCAmelCase )
if audio is None:
return inputs
elif text is None:
return audio_inputs
else:
UpperCAmelCase_ = audio_inputs["input_values"]
if "padding_mask" in audio_inputs:
UpperCAmelCase_ = audio_inputs["padding_mask"]
return inputs
def A__ ( self , *lowerCAmelCase , **lowerCAmelCase ):
UpperCAmelCase_ = kwargs.pop("audio" , lowerCAmelCase )
UpperCAmelCase_ = kwargs.pop("padding_mask" , lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
UpperCAmelCase_ = args[0]
UpperCAmelCase_ = args[1:]
if audio_values is not None:
return self._decode_audio(lowerCAmelCase , padding_mask=lowerCAmelCase )
else:
return self.tokenizer.batch_decode(*lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , *lowerCAmelCase , **lowerCAmelCase ):
return self.tokenizer.decode(*lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = to_numpy(lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = audio_values.shape
if padding_mask is None:
return list(lowerCAmelCase )
UpperCAmelCase_ = to_numpy(lowerCAmelCase )
# match the sequence length of the padding mask to the generated audio arrays by padding with the **non-padding**
# token (so that the generated audio values are **not** treated as padded tokens)
UpperCAmelCase_ = seq_len - padding_mask.shape[-1]
UpperCAmelCase_ = 1 - self.feature_extractor.padding_value
UpperCAmelCase_ = np.pad(lowerCAmelCase , ((0, 0), (0, difference)) , "constant" , constant_values=lowerCAmelCase )
UpperCAmelCase_ = audio_values.tolist()
for i in range(lowerCAmelCase ):
UpperCAmelCase_ = np.asarray(audio_values[i] )[
padding_mask[i][None, :] != self.feature_extractor.padding_value
]
UpperCAmelCase_ = sliced_audio.reshape(lowerCAmelCase , -1 )
return audio_values
| 707 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
if prompt is not None:
UpperCAmelCase_ = prompt
if generate_kwargs is not None:
UpperCAmelCase_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCAmelCase_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
UpperCAmelCase_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCAmelCase , **lowerCAmelCase ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
UpperCAmelCase_ = self.model.config.model_type
if model_type == "git":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(text=lowerCAmelCase , add_special_tokens=lowerCAmelCase ).input_ids
UpperCAmelCase_ = [self.tokenizer.cls_token_id] + input_ids
UpperCAmelCase_ = torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , header_text=lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCAmelCase_ = None
return model_inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCAmelCase )
and all(x is None for x in model_inputs["input_ids"] )
):
UpperCAmelCase_ = None
if generate_kwargs is None:
UpperCAmelCase_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCAmelCase_ = model_inputs.pop(self.model.main_input_name )
UpperCAmelCase_ = self.model.generate(lowerCAmelCase , **lowerCAmelCase , **lowerCAmelCase )
return model_outputs
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = []
for output_ids in model_outputs:
UpperCAmelCase_ = {
"generated_text": self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , )
}
records.append(lowerCAmelCase )
return records
| 23 | 0 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@slow
@require_torch
def A__ ( self ):
UpperCAmelCase_ = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ = bertabert.config.encoder.vocab_size
UpperCAmelCase_ = tokenizer.sep_token_id
UpperCAmelCase_ = tokenizer.cls_token_id
UpperCAmelCase_ = 128
UpperCAmelCase_ = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
UpperCAmelCase_ = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
UpperCAmelCase_ = train_dataset.select(range(32 ) )
UpperCAmelCase_ = val_dataset.select(range(16 ) )
UpperCAmelCase_ = 4
def _map_to_encoder_decoder_inputs(lowerCAmelCase ):
# Tokenizer will automatically set [BOS] <text> [EOS]
UpperCAmelCase_ = tokenizer(batch["article"] , padding="max_length" , truncation=lowerCAmelCase , max_length=512 )
UpperCAmelCase_ = tokenizer(batch["highlights"] , padding="max_length" , truncation=lowerCAmelCase , max_length=128 )
UpperCAmelCase_ = inputs.input_ids
UpperCAmelCase_ = inputs.attention_mask
UpperCAmelCase_ = outputs.input_ids
UpperCAmelCase_ = outputs.input_ids.copy()
UpperCAmelCase_ = [
[-100 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
UpperCAmelCase_ = outputs.attention_mask
assert all(len(lowerCAmelCase ) == 512 for x in inputs.input_ids )
assert all(len(lowerCAmelCase ) == 128 for x in outputs.input_ids )
return batch
def _compute_metrics(lowerCAmelCase ):
UpperCAmelCase_ = pred.label_ids
UpperCAmelCase_ = pred.predictions
# all unnecessary tokens are removed
UpperCAmelCase_ = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = sum([int(pred_str[i] == label_str[i] ) for i in range(len(lowerCAmelCase ) )] ) / len(lowerCAmelCase )
return {"accuracy": accuracy}
# map train dataset
UpperCAmelCase_ = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase , batch_size=lowerCAmelCase , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
UpperCAmelCase_ = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=lowerCAmelCase , batch_size=lowerCAmelCase , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
UpperCAmelCase_ = self.get_auto_remove_tmp_dir()
UpperCAmelCase_ = SeqaSeqTrainingArguments(
output_dir=lowerCAmelCase , per_device_train_batch_size=lowerCAmelCase , per_device_eval_batch_size=lowerCAmelCase , predict_with_generate=lowerCAmelCase , evaluation_strategy="steps" , do_train=lowerCAmelCase , do_eval=lowerCAmelCase , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
UpperCAmelCase_ = SeqaSeqTrainer(
model=lowerCAmelCase , args=lowerCAmelCase , compute_metrics=_compute_metrics , train_dataset=lowerCAmelCase , eval_dataset=lowerCAmelCase , tokenizer=lowerCAmelCase , )
# start training
trainer.train()
| 708 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = TextToVideoSDPipeline
lowerCAmelCase_ : Dict = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowerCAmelCase_ : Optional[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def A__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(lowerCAmelCase )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def A__ ( self ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = TextToVideoSDPipeline(**lowerCAmelCase )
UpperCAmelCase_ = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase_ = "np"
UpperCAmelCase_ = sd_pipe(**lowerCAmelCase ).frames
UpperCAmelCase_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
UpperCAmelCase_ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=1e-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def A__ ( self ):
pass
def A__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=25 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 23 | 0 |
def snake_case__ ( __SCREAMING_SNAKE_CASE = 10 , __SCREAMING_SNAKE_CASE = 22 ) -> int:
UpperCAmelCase_ = range(1 , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = range(1 , __SCREAMING_SNAKE_CASE )
return sum(
1 for power in powers for base in bases if len(str(base**power ) ) == power )
if __name__ == "__main__":
print(f'''{solution(10, 22) = }''')
| 709 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0
UpperCAmelCase_ = ugly_nums[ia] * 2
UpperCAmelCase_ = ugly_nums[ia] * 3
UpperCAmelCase_ = ugly_nums[ia] * 5
for _ in range(1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
ugly_nums.append(__SCREAMING_SNAKE_CASE )
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 23 | 0 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = 'xlm-roberta'
def __init__( self , lowerCAmelCase=3_0522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def A__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 710 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=224 , lowerCAmelCase=1000 , lowerCAmelCase=[3, 3, 6, 4] , lowerCAmelCase=[48, 56, 112, 220] , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = layer_depths
UpperCAmelCase_ = embed_dims
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1e-5 , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ):
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = self.prepare_config_and_inputs()
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCAmelCase_ : int = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[Any] = False
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def A__ ( self ):
pass
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def A__ ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def A__ ( self ):
pass
def A__ ( self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
def _config_zero_init(lowerCAmelCase ):
UpperCAmelCase_ = copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1e-1_0 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
UpperCAmelCase_ = _config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self ):
pass
def snake_case__ ( ) -> str:
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(lowerCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 23 | 0 |
from statistics import mean, stdev
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 3 ) -> list:
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = max(__SCREAMING_SNAKE_CASE )
# normalize data
return [round((x - x_min) / (x_max - x_min) , __SCREAMING_SNAKE_CASE ) for x in data]
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 3 ) -> list:
UpperCAmelCase_ = mean(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = stdev(__SCREAMING_SNAKE_CASE )
# standardize data
return [round((x - mu) / (sigma) , __SCREAMING_SNAKE_CASE ) for x in data]
| 711 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.json", "merges_file": "merges.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"tokenizer_file": {
"EleutherAI/gpt-neox-20b": "https://huggingface.co/EleutherAI/gpt-neox-20b/resolve/main/tokenizer.json",
},
}
SCREAMING_SNAKE_CASE = {
"gpt-neox-20b": 2048,
}
class lowerCamelCase ( lowercase__ ):
lowerCAmelCase_ : int = VOCAB_FILES_NAMES
lowerCAmelCase_ : Optional[int] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : List[str] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase="<|endoftext|>" , lowerCAmelCase=False , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , lowerCAmelCase , tokenizer_file=lowerCAmelCase , unk_token=lowerCAmelCase , bos_token=lowerCAmelCase , eos_token=lowerCAmelCase , add_prefix_space=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , lowerCAmelCase ) != add_prefix_space:
UpperCAmelCase_ = getattr(lowerCAmelCase , pre_tok_state.pop("type" ) )
UpperCAmelCase_ = add_prefix_space
UpperCAmelCase_ = pre_tok_class(**lowerCAmelCase )
UpperCAmelCase_ = add_prefix_space
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase ) + [self.eos_token_id] )
if len(lowerCAmelCase ) > self.model_max_length:
UpperCAmelCase_ = input_ids[-self.model_max_length :]
return input_ids
| 712 |
import math
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list[int]:
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = int(math.sqrt(__SCREAMING_SNAKE_CASE ) ) # Size of every segment
UpperCAmelCase_ = [True] * (end + 1)
UpperCAmelCase_ = []
while start <= end:
if temp[start] is True:
in_prime.append(__SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
start += 1
prime += in_prime
UpperCAmelCase_ = end + 1
UpperCAmelCase_ = min(2 * end , __SCREAMING_SNAKE_CASE )
while low <= n:
UpperCAmelCase_ = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__SCREAMING_SNAKE_CASE , high + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase_ = high + 1
UpperCAmelCase_ = min(high + end , __SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 23 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = {
"task_specific_params": {
"summarization": {"length_penalty": 1.0, "max_length": 128, "min_length": 12, "num_beams": 4},
"summarization_cnn": {"length_penalty": 2.0, "max_length": 142, "min_length": 56, "num_beams": 4},
"summarization_xsum": {"length_penalty": 1.0, "max_length": 62, "min_length": 11, "num_beams": 6},
}
}
UpperCAmelCase_ = {
"task_specific_params.summarization.length_penalty": 1.0,
"task_specific_params.summarization.max_length": 128,
"task_specific_params.summarization.min_length": 12,
"task_specific_params.summarization.num_beams": 4,
"task_specific_params.summarization_cnn.length_penalty": 2.0,
"task_specific_params.summarization_cnn.max_length": 142,
"task_specific_params.summarization_cnn.min_length": 56,
"task_specific_params.summarization_cnn.num_beams": 4,
"task_specific_params.summarization_xsum.length_penalty": 1.0,
"task_specific_params.summarization_xsum.max_length": 62,
"task_specific_params.summarization_xsum.min_length": 11,
"task_specific_params.summarization_xsum.num_beams": 6,
}
self.assertEqual(flatten_dict(lowerCAmelCase ) , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(lowerCAmelCase ) , x.transpose() ) )
UpperCAmelCase_ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(lowerCAmelCase , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def A__ ( self ):
UpperCAmelCase_ = np.random.randn(3 , 4 )
UpperCAmelCase_ = torch.tensor(lowerCAmelCase )
self.assertTrue(np.allclose(transpose(lowerCAmelCase ) , transpose(lowerCAmelCase ).numpy() ) )
UpperCAmelCase_ = np.random.randn(3 , 4 , 5 )
UpperCAmelCase_ = torch.tensor(lowerCAmelCase )
self.assertTrue(np.allclose(transpose(lowerCAmelCase , axes=(1, 2, 0) ) , transpose(lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def A__ ( self ):
UpperCAmelCase_ = np.random.randn(3 , 4 )
UpperCAmelCase_ = tf.constant(lowerCAmelCase )
self.assertTrue(np.allclose(transpose(lowerCAmelCase ) , transpose(lowerCAmelCase ).numpy() ) )
UpperCAmelCase_ = np.random.randn(3 , 4 , 5 )
UpperCAmelCase_ = tf.constant(lowerCAmelCase )
self.assertTrue(np.allclose(transpose(lowerCAmelCase , axes=(1, 2, 0) ) , transpose(lowerCAmelCase , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def A__ ( self ):
UpperCAmelCase_ = np.random.randn(3 , 4 )
UpperCAmelCase_ = jnp.array(lowerCAmelCase )
self.assertTrue(np.allclose(transpose(lowerCAmelCase ) , np.asarray(transpose(lowerCAmelCase ) ) ) )
UpperCAmelCase_ = np.random.randn(3 , 4 , 5 )
UpperCAmelCase_ = jnp.array(lowerCAmelCase )
self.assertTrue(np.allclose(transpose(lowerCAmelCase , axes=(1, 2, 0) ) , np.asarray(transpose(lowerCAmelCase , axes=(1, 2, 0) ) ) ) )
def A__ ( self ):
UpperCAmelCase_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(lowerCAmelCase , (4, 3) ) , np.reshape(lowerCAmelCase , (4, 3) ) ) )
UpperCAmelCase_ = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(lowerCAmelCase , (12, 5) ) , np.reshape(lowerCAmelCase , (12, 5) ) ) )
@require_torch
def A__ ( self ):
UpperCAmelCase_ = np.random.randn(3 , 4 )
UpperCAmelCase_ = torch.tensor(lowerCAmelCase )
self.assertTrue(np.allclose(reshape(lowerCAmelCase , (4, 3) ) , reshape(lowerCAmelCase , (4, 3) ).numpy() ) )
UpperCAmelCase_ = np.random.randn(3 , 4 , 5 )
UpperCAmelCase_ = torch.tensor(lowerCAmelCase )
self.assertTrue(np.allclose(reshape(lowerCAmelCase , (12, 5) ) , reshape(lowerCAmelCase , (12, 5) ).numpy() ) )
@require_tf
def A__ ( self ):
UpperCAmelCase_ = np.random.randn(3 , 4 )
UpperCAmelCase_ = tf.constant(lowerCAmelCase )
self.assertTrue(np.allclose(reshape(lowerCAmelCase , (4, 3) ) , reshape(lowerCAmelCase , (4, 3) ).numpy() ) )
UpperCAmelCase_ = np.random.randn(3 , 4 , 5 )
UpperCAmelCase_ = tf.constant(lowerCAmelCase )
self.assertTrue(np.allclose(reshape(lowerCAmelCase , (12, 5) ) , reshape(lowerCAmelCase , (12, 5) ).numpy() ) )
@require_flax
def A__ ( self ):
UpperCAmelCase_ = np.random.randn(3 , 4 )
UpperCAmelCase_ = jnp.array(lowerCAmelCase )
self.assertTrue(np.allclose(reshape(lowerCAmelCase , (4, 3) ) , np.asarray(reshape(lowerCAmelCase , (4, 3) ) ) ) )
UpperCAmelCase_ = np.random.randn(3 , 4 , 5 )
UpperCAmelCase_ = jnp.array(lowerCAmelCase )
self.assertTrue(np.allclose(reshape(lowerCAmelCase , (12, 5) ) , np.asarray(reshape(lowerCAmelCase , (12, 5) ) ) ) )
def A__ ( self ):
UpperCAmelCase_ = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(lowerCAmelCase ) , np.squeeze(lowerCAmelCase ) ) )
UpperCAmelCase_ = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(lowerCAmelCase , axis=2 ) , np.squeeze(lowerCAmelCase , axis=2 ) ) )
@require_torch
def A__ ( self ):
UpperCAmelCase_ = np.random.randn(1 , 3 , 4 )
UpperCAmelCase_ = torch.tensor(lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(lowerCAmelCase ) , squeeze(lowerCAmelCase ).numpy() ) )
UpperCAmelCase_ = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase_ = torch.tensor(lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(lowerCAmelCase , axis=2 ) , squeeze(lowerCAmelCase , axis=2 ).numpy() ) )
@require_tf
def A__ ( self ):
UpperCAmelCase_ = np.random.randn(1 , 3 , 4 )
UpperCAmelCase_ = tf.constant(lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(lowerCAmelCase ) , squeeze(lowerCAmelCase ).numpy() ) )
UpperCAmelCase_ = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase_ = tf.constant(lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(lowerCAmelCase , axis=2 ) , squeeze(lowerCAmelCase , axis=2 ).numpy() ) )
@require_flax
def A__ ( self ):
UpperCAmelCase_ = np.random.randn(1 , 3 , 4 )
UpperCAmelCase_ = jnp.array(lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(lowerCAmelCase ) , np.asarray(squeeze(lowerCAmelCase ) ) ) )
UpperCAmelCase_ = np.random.randn(1 , 4 , 1 , 5 )
UpperCAmelCase_ = jnp.array(lowerCAmelCase )
self.assertTrue(np.allclose(squeeze(lowerCAmelCase , axis=2 ) , np.asarray(squeeze(lowerCAmelCase , axis=2 ) ) ) )
def A__ ( self ):
UpperCAmelCase_ = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(lowerCAmelCase , axis=1 ) , np.expand_dims(lowerCAmelCase , axis=1 ) ) )
@require_torch
def A__ ( self ):
UpperCAmelCase_ = np.random.randn(3 , 4 )
UpperCAmelCase_ = torch.tensor(lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(lowerCAmelCase , axis=1 ) , expand_dims(lowerCAmelCase , axis=1 ).numpy() ) )
@require_tf
def A__ ( self ):
UpperCAmelCase_ = np.random.randn(3 , 4 )
UpperCAmelCase_ = tf.constant(lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(lowerCAmelCase , axis=1 ) , expand_dims(lowerCAmelCase , axis=1 ).numpy() ) )
@require_flax
def A__ ( self ):
UpperCAmelCase_ = np.random.randn(3 , 4 )
UpperCAmelCase_ = jnp.array(lowerCAmelCase )
self.assertTrue(np.allclose(expand_dims(lowerCAmelCase , axis=1 ) , np.asarray(expand_dims(lowerCAmelCase , axis=1 ) ) ) )
| 713 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : torch.FloatTensor
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 32 , lowerCAmelCase = 64 , lowerCAmelCase = 20 , lowerCAmelCase = 768 , lowerCAmelCase=77 , lowerCAmelCase=4 , lowerCAmelCase = 0.0 , lowerCAmelCase = "silu" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "linear" , lowerCAmelCase = "prd" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ):
super().__init__()
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = attention_head_dim
UpperCAmelCase_ = num_attention_heads * attention_head_dim
UpperCAmelCase_ = additional_embeddings
UpperCAmelCase_ = time_embed_dim or inner_dim
UpperCAmelCase_ = embedding_proj_dim or embedding_dim
UpperCAmelCase_ = clip_embed_dim or embedding_dim
UpperCAmelCase_ = Timesteps(lowerCAmelCase , lowerCAmelCase , 0 )
UpperCAmelCase_ = TimestepEmbedding(lowerCAmelCase , lowerCAmelCase , out_dim=lowerCAmelCase , act_fn=lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if embedding_proj_norm_type is None:
UpperCAmelCase_ = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if encoder_hid_proj_type is None:
UpperCAmelCase_ = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCAmelCase ) )
if added_emb_type == "prd":
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , 1 , lowerCAmelCase ) )
elif added_emb_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , dropout=lowerCAmelCase , activation_fn="gelu" , attention_bias=lowerCAmelCase , )
for d in range(lowerCAmelCase )
] )
if norm_in_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
elif norm_in_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , lowerCAmelCase , persistent=lowerCAmelCase )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ):
UpperCAmelCase_ = {}
def fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
UpperCAmelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return processors
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
module.set_processor(lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
self.set_attn_processor(AttnProcessor() )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , ):
UpperCAmelCase_ = hidden_states.shape[0]
UpperCAmelCase_ = timestep
if not torch.is_tensor(lowerCAmelCase ):
UpperCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ = timesteps * torch.ones(lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase_ = self.time_proj(lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ = self.time_embedding(lowerCAmelCase )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ = self.embedding_proj_norm(lowerCAmelCase )
UpperCAmelCase_ = self.embedding_proj(lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ = self.encoder_hidden_states_proj(lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
UpperCAmelCase_ = self.proj_in(lowerCAmelCase )
UpperCAmelCase_ = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ = hidden_states[:, None, :]
UpperCAmelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCAmelCase , -1 , -1 )
additional_embeds.append(lowerCAmelCase )
UpperCAmelCase_ = torch.cat(
lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ = F.pad(
lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
UpperCAmelCase_ = F.pad(lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ = self.norm_in(lowerCAmelCase )
for block in self.transformer_blocks:
UpperCAmelCase_ = block(lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase_ = self.norm_out(lowerCAmelCase )
if self.prd_embedding is not None:
UpperCAmelCase_ = hidden_states[:, -1]
else:
UpperCAmelCase_ = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ = self.proj_to_clip_embeddings(lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 23 | 0 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece_bpe.model")
SCREAMING_SNAKE_CASE = "pt" if is_torch_available() else "tf"
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = CamembertTokenizer
lowerCAmelCase_ : int = CamembertTokenizerFast
lowerCAmelCase_ : Tuple = True
lowerCAmelCase_ : Union[str, Any] = True
def A__ ( self ):
super().setUp()
# We have a SentencePiece fixture for testing
UpperCAmelCase_ = CamembertTokenizer(lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self ):
UpperCAmelCase_ = "<pad>"
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(lowerCAmelCase ) , 1004 )
def A__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1005 )
def A__ ( self ):
UpperCAmelCase_ = CamembertTokenizer(lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
UpperCAmelCase_ = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
@slow
def A__ ( self ):
# fmt: off
UpperCAmelCase_ = {"input_ids": [[5, 54, 7196, 297, 30, 23, 776, 18, 11, 3215, 3705, 8252, 22, 3164, 1181, 2116, 29, 16, 813, 25, 791, 3314, 20, 3446, 38, 2_7575, 120, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 468, 17, 11, 9088, 20, 1517, 8, 2_2804, 1_8818, 10, 38, 629, 607, 607, 142, 19, 7196, 867, 56, 1_0326, 24, 2267, 20, 416, 5072, 1_5612, 233, 734, 7, 2399, 27, 16, 3015, 1649, 7, 24, 20, 4338, 2399, 27, 13, 3400, 14, 13, 6189, 8, 930, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
UpperCAmelCase_ = [
"Le transformeur est un modèle d'apprentissage profond introduit en 2017, "
"utilisé principalement dans le domaine du traitement automatique des langues (TAL).",
"À l'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus "
"pour gérer des données séquentielles, telles que le langage naturel, pour des tâches "
"telles que la traduction et la synthèse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=lowerCAmelCase , )
| 714 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 715 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = 'xlm-roberta'
def __init__( self , lowerCAmelCase=3_0522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def A__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 23 | 0 |
import argparse
import io
import requests
import torch
from omegaconf import OmegaConf
from diffusers import AutoencoderKL
from diffusers.pipelines.stable_diffusion.convert_from_ckpt import (
assign_to_checkpoint,
conv_attn_to_linear,
create_vae_diffusers_config,
renew_vae_attention_paths,
renew_vae_resnet_paths,
)
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ = checkpoint
UpperCAmelCase_ = {}
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["encoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_in.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.conv_out.bias"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.weight"]
UpperCAmelCase_ = vae_state_dict["decoder.norm_out.bias"]
UpperCAmelCase_ = vae_state_dict["quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["quant_conv.bias"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.weight"]
UpperCAmelCase_ = vae_state_dict["post_quant_conv.bias"]
# Retrieves the keys for the encoder down blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "encoder.down" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f'''down.{layer_id}''' in key] for layer_id in range(__SCREAMING_SNAKE_CASE )
}
# Retrieves the keys for the decoder up blocks only
UpperCAmelCase_ = len({".".join(layer.split("." )[:3] ) for layer in vae_state_dict if "decoder.up" in layer} )
UpperCAmelCase_ = {
layer_id: [key for key in vae_state_dict if f'''up.{layer_id}''' in key] for layer_id in range(__SCREAMING_SNAKE_CASE )
}
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = [key for key in down_blocks[i] if f'''down.{i}''' in key and f'''down.{i}.downsample''' not in key]
if f'''encoder.down.{i}.downsample.conv.weight''' in vae_state_dict:
UpperCAmelCase_ = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.weight''' )
UpperCAmelCase_ = vae_state_dict.pop(
f'''encoder.down.{i}.downsample.conv.bias''' )
UpperCAmelCase_ = renew_vae_resnet_paths(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = {"old": f'''down.{i}.block''', "new": f'''down_blocks.{i}.resnets'''}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f'''encoder.mid.block_{i}''' in key]
UpperCAmelCase_ = renew_vae_resnet_paths(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = {"old": f'''mid.block_{i}''', "new": f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = [key for key in vae_state_dict if "encoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
conv_attn_to_linear(__SCREAMING_SNAKE_CASE )
for i in range(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = num_up_blocks - 1 - i
UpperCAmelCase_ = [
key for key in up_blocks[block_id] if f'''up.{block_id}''' in key and f'''up.{block_id}.upsample''' not in key
]
if f'''decoder.up.{block_id}.upsample.conv.weight''' in vae_state_dict:
UpperCAmelCase_ = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.weight'''
]
UpperCAmelCase_ = vae_state_dict[
f'''decoder.up.{block_id}.upsample.conv.bias'''
]
UpperCAmelCase_ = renew_vae_resnet_paths(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = {"old": f'''up.{block_id}.block''', "new": f'''up_blocks.{i}.resnets'''}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.block" in key]
UpperCAmelCase_ = 2
for i in range(1 , num_mid_res_blocks + 1 ):
UpperCAmelCase_ = [key for key in mid_resnets if f'''decoder.mid.block_{i}''' in key]
UpperCAmelCase_ = renew_vae_resnet_paths(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = {"old": f'''mid.block_{i}''', "new": f'''mid_block.resnets.{i - 1}'''}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = [key for key in vae_state_dict if "decoder.mid.attn" in key]
UpperCAmelCase_ = renew_vae_attention_paths(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = {"old": "mid.attn_1", "new": "mid_block.attentions.0"}
assign_to_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , additional_replacements=[meta_path] , config=__SCREAMING_SNAKE_CASE )
conv_attn_to_linear(__SCREAMING_SNAKE_CASE )
return new_checkpoint
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> List[str]:
# Only support V1
UpperCAmelCase_ = requests.get(
" https://raw.githubusercontent.com/CompVis/stable-diffusion/main/configs/stable-diffusion/v1-inference.yaml" )
UpperCAmelCase_ = io.BytesIO(r.content )
UpperCAmelCase_ = OmegaConf.load(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = 512
UpperCAmelCase_ = "cuda" if torch.cuda.is_available() else "cpu"
if checkpoint_path.endswith("safetensors" ):
from safetensors import safe_open
UpperCAmelCase_ = {}
with safe_open(__SCREAMING_SNAKE_CASE , framework="pt" , device="cpu" ) as f:
for key in f.keys():
UpperCAmelCase_ = f.get_tensor(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = torch.load(__SCREAMING_SNAKE_CASE , map_location=__SCREAMING_SNAKE_CASE )["state_dict"]
# Convert the VAE model.
UpperCAmelCase_ = create_vae_diffusers_config(__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = custom_convert_ldm_vae_checkpoint(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = AutoencoderKL(**__SCREAMING_SNAKE_CASE )
vae.load_state_dict(__SCREAMING_SNAKE_CASE )
vae.save_pretrained(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--vae_pt_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
parser.add_argument("--dump_path", default=None, type=str, required=True, help="Path to the VAE.pt to convert.")
SCREAMING_SNAKE_CASE = parser.parse_args()
vae_pt_to_vae_diffuser(args.vae_pt_path, args.dump_path)
| 716 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCAmelCase_ = "-" if number.startswith("-" ) else ""
UpperCAmelCase_ = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 23 | 0 |
from math import factorial
def snake_case__ ( __SCREAMING_SNAKE_CASE = 20 ) -> int:
UpperCAmelCase_ = 2 * n # middle entry of odd rows starting at row 3 is the solution for n = 1,
# 2, 3,...
UpperCAmelCase_ = n // 2
return int(factorial(__SCREAMING_SNAKE_CASE ) / (factorial(__SCREAMING_SNAKE_CASE ) * factorial(n - k )) )
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
print(solution(20))
else:
try:
SCREAMING_SNAKE_CASE = int(sys.argv[1])
print(solution(n))
except ValueError:
print("Invalid entry - please enter a number.")
| 717 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase ).to(lowerCAmelCase )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = model(input_ids.to(lowerCAmelCase ) , labels=labels.to(lowerCAmelCase ) ).loss
UpperCAmelCase_ = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 23 | 0 |
from __future__ import annotations
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> dict[str, float]:
if (voltage, current, resistance).count(0 ) != 1:
raise ValueError("One and only one argument must be 0" )
if resistance < 0:
raise ValueError("Resistance cannot be negative" )
if voltage == 0:
return {"voltage": float(current * resistance )}
elif current == 0:
return {"current": voltage / resistance}
elif resistance == 0:
return {"resistance": voltage / current}
else:
raise ValueError("Exactly one argument must be 0" )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 718 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
UpperCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 23 | 0 |
'''simple docstring'''
import unittest
from transformers import BigBirdTokenizer, BigBirdTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, require_torch, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
SCREAMING_SNAKE_CASE = "▁"
SCREAMING_SNAKE_CASE = get_tests_dir("fixtures/test_sentencepiece.model")
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : str = BigBirdTokenizer
lowerCAmelCase_ : Union[str, Any] = BigBirdTokenizerFast
lowerCAmelCase_ : Optional[int] = True
lowerCAmelCase_ : Union[str, Any] = True
def A__ ( self ):
super().setUp()
UpperCAmelCase_ = self.tokenizer_class(lowerCAmelCase , keep_accents=lowerCAmelCase )
tokenizer.save_pretrained(self.tmpdirname )
def A__ ( self ):
UpperCAmelCase_ = "<s>"
UpperCAmelCase_ = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(lowerCAmelCase ) , lowerCAmelCase )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(lowerCAmelCase ) , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<unk>" )
self.assertEqual(vocab_keys[1] , "<s>" )
self.assertEqual(vocab_keys[-1] , "[MASK]" )
self.assertEqual(len(lowerCAmelCase ) , 1004 )
def A__ ( self ):
self.assertEqual(self.get_tokenizer().vocab_size , 1000 )
def A__ ( self ):
if not self.test_rust_tokenizer:
return
UpperCAmelCase_ = self.get_tokenizer()
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = "I was born in 92000, and this is falsé."
UpperCAmelCase_ = tokenizer.tokenize(lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.tokenize(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase , add_special_tokens=lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = self.get_rust_tokenizer()
UpperCAmelCase_ = tokenizer.encode(lowerCAmelCase )
UpperCAmelCase_ = rust_tokenizer.encode(lowerCAmelCase )
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = BigBirdTokenizer(lowerCAmelCase , keep_accents=lowerCAmelCase )
UpperCAmelCase_ = tokenizer.tokenize("This is a test" )
self.assertListEqual(lowerCAmelCase , ["▁This", "▁is", "▁a", "▁t", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(lowerCAmelCase ) , [285, 46, 10, 170, 382] , )
UpperCAmelCase_ = tokenizer.tokenize("I was born in 92000, and this is falsé." )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"é",
".",
] , )
UpperCAmelCase_ = tokenizer.convert_tokens_to_ids(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [8, 21, 84, 55, 24, 19, 7, 0, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, 0, 4] , )
UpperCAmelCase_ = tokenizer.convert_ids_to_tokens(lowerCAmelCase )
self.assertListEqual(
lowerCAmelCase , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
@cached_property
def A__ ( self ):
return BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
@slow
def A__ ( self ):
UpperCAmelCase_ = "Hello World!"
UpperCAmelCase_ = [65, 1_8536, 2260, 101, 66]
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@slow
def A__ ( self ):
UpperCAmelCase_ = (
"This is a very long text with a lot of weird characters, such as: . , ~ ? ( ) \" [ ] ! : - . Also we will"
" add words that should not exsist and be tokenized to <unk>, such as saoneuhaoesuth"
)
# fmt: off
UpperCAmelCase_ = [65, 871, 419, 358, 946, 991, 2521, 452, 358, 1357, 387, 7751, 3536, 112, 985, 456, 126, 865, 938, 5400, 5734, 458, 1368, 467, 786, 2462, 5246, 1159, 633, 865, 4519, 457, 582, 852, 2557, 427, 916, 508, 405, 3_4324, 497, 391, 408, 1_1342, 1244, 385, 100, 938, 985, 456, 574, 362, 1_2597, 3200, 3129, 1172, 66] # noqa: E231
# fmt: on
self.assertListEqual(lowerCAmelCase , self.big_tokenizer.encode(lowerCAmelCase ) )
@require_torch
@slow
def A__ ( self ):
import torch
from transformers import BigBirdConfig, BigBirdModel
# Build sequence
UpperCAmelCase_ = list(self.big_tokenizer.get_vocab().keys() )[:10]
UpperCAmelCase_ = " ".join(lowerCAmelCase )
UpperCAmelCase_ = self.big_tokenizer.encode_plus(lowerCAmelCase , return_tensors="pt" , return_token_type_ids=lowerCAmelCase )
UpperCAmelCase_ = self.big_tokenizer.batch_encode_plus(
[sequence + " " + sequence] , return_tensors="pt" , return_token_type_ids=lowerCAmelCase )
UpperCAmelCase_ = BigBirdConfig(attention_type="original_full" )
UpperCAmelCase_ = BigBirdModel(lowerCAmelCase )
assert model.get_input_embeddings().weight.shape[0] >= self.big_tokenizer.vocab_size
with torch.no_grad():
model(**lowerCAmelCase )
model(**lowerCAmelCase )
@slow
def A__ ( self ):
UpperCAmelCase_ = BigBirdTokenizer.from_pretrained("google/bigbird-roberta-base" )
UpperCAmelCase_ = tokenizer.decode(tokenizer("Paris is the [MASK]." ).input_ids )
self.assertTrue(decoded_text == "[CLS] Paris is the[MASK].[SEP]" )
@slow
def A__ ( self ):
# fmt: off
UpperCAmelCase_ = {"input_ids": [[65, 3_9286, 458, 3_6335, 2001, 456, 1_3073, 1_3266, 455, 113, 7746, 1741, 1_1157, 391, 1_3073, 1_3266, 455, 113, 3967, 3_5412, 113, 4936, 109, 3870, 2377, 113, 3_0084, 4_5720, 458, 134, 1_7496, 112, 503, 1_1672, 113, 118, 112, 5665, 1_3347, 3_8687, 112, 1496, 3_1389, 112, 3268, 4_7264, 134, 962, 112, 1_6377, 8035, 2_3130, 430, 1_2169, 1_5518, 2_8592, 458, 146, 4_1697, 109, 391, 1_2169, 1_5518, 1_6689, 458, 146, 4_1358, 109, 452, 726, 4034, 111, 763, 3_5412, 5082, 388, 1903, 111, 9051, 391, 2870, 4_8918, 1900, 1123, 550, 998, 112, 9586, 1_5985, 455, 391, 410, 2_2955, 3_7636, 114, 66], [65, 448, 1_7496, 419, 3663, 385, 763, 113, 2_7533, 2870, 3283, 1_3043, 1639, 2_4713, 523, 656, 2_4013, 1_8550, 2521, 517, 2_7014, 2_1244, 420, 1212, 1465, 391, 927, 4833, 388, 578, 1_1786, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [65, 484, 2169, 7687, 2_1932, 1_8146, 726, 363, 1_7032, 3391, 114, 66, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=lowerCAmelCase , model_name="google/bigbird-roberta-base" , revision="215c99f1600e06f83acce68422f2035b2b5c3510" , )
| 719 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[List[ImageInput]]:
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ['pixel_values']
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size["shortest_edge"] , default_to_square=lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(lowerCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(lowerCAmelCase , size=lowerCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase )
return image
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(lowerCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=lowerCAmelCase , do_resize=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , do_center_crop=lowerCAmelCase , crop_size=lowerCAmelCase , do_rescale=lowerCAmelCase , rescale_factor=lowerCAmelCase , do_normalize=lowerCAmelCase , image_mean=lowerCAmelCase , image_std=lowerCAmelCase , data_format=lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 23 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def snake_case__ ( __SCREAMING_SNAKE_CASE = 3 ) -> qiskit.result.counts.Counts:
if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
raise TypeError("number of qubits must be a integer." )
if number_of_qubits <= 0:
raise ValueError("number of qubits must be > 0." )
if math.floor(__SCREAMING_SNAKE_CASE ) != number_of_qubits:
raise ValueError("number of qubits must be exact integer." )
if number_of_qubits > 10:
raise ValueError("number of qubits too large to simulate(>10)." )
UpperCAmelCase_ = QuantumRegister(__SCREAMING_SNAKE_CASE , "qr" )
UpperCAmelCase_ = ClassicalRegister(__SCREAMING_SNAKE_CASE , "cr" )
UpperCAmelCase_ = QuantumCircuit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = number_of_qubits
for i in range(__SCREAMING_SNAKE_CASE ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(__SCREAMING_SNAKE_CASE ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(__SCREAMING_SNAKE_CASE , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# simulate with 10000 shots
UpperCAmelCase_ = Aer.get_backend("qasm_simulator" )
UpperCAmelCase_ = execute(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , shots=1_0000 )
return job.result().get_counts(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(
f'''Total count for quantum fourier transform state is: \
{quantum_fourier_transform(3)}'''
)
| 720 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 0
while number > 0:
UpperCAmelCase_ = number % 10
sum_of_digits += last_digit
UpperCAmelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case__ ( __SCREAMING_SNAKE_CASE = 100 ) -> int:
UpperCAmelCase_ = factorial(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = split_and_add(__SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 23 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import DetrConfig, DetrForObjectDetection, DetrForSegmentation, DetrImageProcessor, ResNetConfig
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Any:
# initialize config
if "resnet-50" in model_name:
UpperCAmelCase_ = ResNetConfig.from_pretrained("microsoft/resnet-50" )
elif "resnet-101" in model_name:
UpperCAmelCase_ = ResNetConfig.from_pretrained("microsoft/resnet-101" )
else:
raise ValueError("Model name should include either resnet50 or resnet101" )
UpperCAmelCase_ = DetrConfig(use_timm_backbone=__SCREAMING_SNAKE_CASE , backbone_config=__SCREAMING_SNAKE_CASE )
# set label attributes
UpperCAmelCase_ = "panoptic" in model_name
if is_panoptic:
UpperCAmelCase_ = 250
else:
UpperCAmelCase_ = 91
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = "coco-detection-id2label.json"
UpperCAmelCase_ = json.load(open(hf_hub_download(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
UpperCAmelCase_ = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
return config, is_panoptic
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Tuple:
# here we list all keys to be renamed (original name on the left, our name on the right)
UpperCAmelCase_ = []
# stem
# fmt: off
rename_keys.append(("backbone.0.body.conv1.weight", "backbone.conv_encoder.model.embedder.embedder.convolution.weight") )
rename_keys.append(("backbone.0.body.bn1.weight", "backbone.conv_encoder.model.embedder.embedder.normalization.weight") )
rename_keys.append(("backbone.0.body.bn1.bias", "backbone.conv_encoder.model.embedder.embedder.normalization.bias") )
rename_keys.append(("backbone.0.body.bn1.running_mean", "backbone.conv_encoder.model.embedder.embedder.normalization.running_mean") )
rename_keys.append(("backbone.0.body.bn1.running_var", "backbone.conv_encoder.model.embedder.embedder.normalization.running_var") )
# stages
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
# shortcut
if layer_idx == 0:
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.0.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.downsample.1.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.shortcut.normalization.running_var''',
) )
# 3 convs
for i in range(3 ):
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.conv{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.convolution.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.weight''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.weight''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.bias''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.bias''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_mean''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_mean''',
) )
rename_keys.append(
(
f'''backbone.0.body.layer{stage_idx + 1}.{layer_idx}.bn{i+1}.running_var''',
f'''backbone.conv_encoder.model.encoder.stages.{stage_idx}.layers.{layer_idx}.layer.{i}.normalization.running_var''',
) )
# fmt: on
for i in range(config.encoder_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(
f'''transformer.encoder.layers.{i}.self_attn.out_proj.weight''',
f'''encoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.self_attn.out_proj.bias''', f'''encoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.weight''', f'''encoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear1.bias''', f'''encoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.weight''', f'''encoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.linear2.bias''', f'''encoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.weight''', f'''encoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm1.bias''', f'''encoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.encoder.layers.{i}.norm2.weight''', f'''encoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.encoder.layers.{i}.norm2.bias''', f'''encoder.layers.{i}.final_layer_norm.bias''') )
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.self_attn.out_proj.weight''',
f'''decoder.layers.{i}.self_attn.out_proj.weight''',
) )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.self_attn.out_proj.bias''', f'''decoder.layers.{i}.self_attn.out_proj.bias''') )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.weight''',
f'''decoder.layers.{i}.encoder_attn.out_proj.weight''',
) )
rename_keys.append(
(
f'''transformer.decoder.layers.{i}.multihead_attn.out_proj.bias''',
f'''decoder.layers.{i}.encoder_attn.out_proj.bias''',
) )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.weight''', f'''decoder.layers.{i}.fc1.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear1.bias''', f'''decoder.layers.{i}.fc1.bias''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.weight''', f'''decoder.layers.{i}.fc2.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.linear2.bias''', f'''decoder.layers.{i}.fc2.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.weight''', f'''decoder.layers.{i}.self_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm1.bias''', f'''decoder.layers.{i}.self_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.weight''', f'''decoder.layers.{i}.encoder_attn_layer_norm.weight''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm2.bias''', f'''decoder.layers.{i}.encoder_attn_layer_norm.bias''') )
rename_keys.append(
(f'''transformer.decoder.layers.{i}.norm3.weight''', f'''decoder.layers.{i}.final_layer_norm.weight''') )
rename_keys.append((f'''transformer.decoder.layers.{i}.norm3.bias''', f'''decoder.layers.{i}.final_layer_norm.bias''') )
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
rename_keys.extend(
[
("input_proj.weight", "input_projection.weight"),
("input_proj.bias", "input_projection.bias"),
("query_embed.weight", "query_position_embeddings.weight"),
("transformer.decoder.norm.weight", "decoder.layernorm.weight"),
("transformer.decoder.norm.bias", "decoder.layernorm.bias"),
("class_embed.weight", "class_labels_classifier.weight"),
("class_embed.bias", "class_labels_classifier.bias"),
("bbox_embed.layers.0.weight", "bbox_predictor.layers.0.weight"),
("bbox_embed.layers.0.bias", "bbox_predictor.layers.0.bias"),
("bbox_embed.layers.1.weight", "bbox_predictor.layers.1.weight"),
("bbox_embed.layers.1.bias", "bbox_predictor.layers.1.bias"),
("bbox_embed.layers.2.weight", "bbox_predictor.layers.2.weight"),
("bbox_embed.layers.2.bias", "bbox_predictor.layers.2.bias"),
] )
return rename_keys
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ = state_dict.pop(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = val
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=False ) -> Optional[Any]:
UpperCAmelCase_ = ""
if is_panoptic:
UpperCAmelCase_ = "detr."
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
UpperCAmelCase_ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase_ = state_dict.pop(f'''{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:256, :]
UpperCAmelCase_ = in_proj_bias[:256]
UpperCAmelCase_ = in_proj_weight[256:512, :]
UpperCAmelCase_ = in_proj_bias[256:512]
UpperCAmelCase_ = in_proj_weight[-256:, :]
UpperCAmelCase_ = in_proj_bias[-256:]
# next: transformer decoder (which is a bit more complex because it also includes cross-attention)
for i in range(6 ):
# read in weights + bias of input projection layer of self-attention
UpperCAmelCase_ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_weight''' )
UpperCAmelCase_ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.self_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) to the state dict
UpperCAmelCase_ = in_proj_weight[:256, :]
UpperCAmelCase_ = in_proj_bias[:256]
UpperCAmelCase_ = in_proj_weight[256:512, :]
UpperCAmelCase_ = in_proj_bias[256:512]
UpperCAmelCase_ = in_proj_weight[-256:, :]
UpperCAmelCase_ = in_proj_bias[-256:]
# read in weights + bias of input projection layer of cross-attention
UpperCAmelCase_ = state_dict.pop(
f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_weight''' )
UpperCAmelCase_ = state_dict.pop(f'''{prefix}transformer.decoder.layers.{i}.multihead_attn.in_proj_bias''' )
# next, add query, keys and values (in that order) of cross-attention to the state dict
UpperCAmelCase_ = in_proj_weight_cross_attn[:256, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[:256]
UpperCAmelCase_ = in_proj_weight_cross_attn[256:512, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[256:512]
UpperCAmelCase_ = in_proj_weight_cross_attn[-256:, :]
UpperCAmelCase_ = in_proj_bias_cross_attn[-256:]
def snake_case__ ( ) -> str:
UpperCAmelCase_ = "http://images.cocodataset.org/val2017/000000039769.jpg"
UpperCAmelCase_ = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw )
return im
@torch.no_grad()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None , __SCREAMING_SNAKE_CASE=False ) -> str:
UpperCAmelCase_ , UpperCAmelCase_ = get_detr_config(__SCREAMING_SNAKE_CASE )
# load original model from torch hub
UpperCAmelCase_ = {
"detr-resnet-50": "detr_resnet50",
"detr-resnet-101": "detr_resnet101",
}
logger.info(f'''Converting model {model_name}...''' )
UpperCAmelCase_ = torch.hub.load("facebookresearch/detr" , model_name_to_original_name[model_name] , pretrained=__SCREAMING_SNAKE_CASE ).eval()
UpperCAmelCase_ = detr.state_dict()
# rename keys
for src, dest in create_rename_keys(__SCREAMING_SNAKE_CASE ):
if is_panoptic:
UpperCAmelCase_ = "detr." + src
rename_key(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# query, key and value matrices need special treatment
read_in_q_k_v(__SCREAMING_SNAKE_CASE , is_panoptic=__SCREAMING_SNAKE_CASE )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
UpperCAmelCase_ = "detr.model." if is_panoptic else "model."
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("detr" )
and not key.startswith("class_labels_classifier" )
and not key.startswith("bbox_predictor" )
):
UpperCAmelCase_ = state_dict.pop(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
UpperCAmelCase_ = state_dict.pop(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = val
elif key.startswith("bbox_attention" ) or key.startswith("mask_head" ):
continue
else:
UpperCAmelCase_ = state_dict.pop(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = val
else:
if not key.startswith("class_labels_classifier" ) and not key.startswith("bbox_predictor" ):
UpperCAmelCase_ = state_dict.pop(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = val
# finally, create HuggingFace model and load state dict
UpperCAmelCase_ = DetrForSegmentation(__SCREAMING_SNAKE_CASE ) if is_panoptic else DetrForObjectDetection(__SCREAMING_SNAKE_CASE )
model.load_state_dict(__SCREAMING_SNAKE_CASE )
model.eval()
# verify our conversion on an image
UpperCAmelCase_ = "coco_panoptic" if is_panoptic else "coco_detection"
UpperCAmelCase_ = DetrImageProcessor(format=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = processor(images=prepare_img() , return_tensors="pt" )
UpperCAmelCase_ = encoding["pixel_values"]
UpperCAmelCase_ = detr(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = model(__SCREAMING_SNAKE_CASE )
assert torch.allclose(outputs.logits , original_outputs["pred_logits"] , atol=1E-3 )
assert torch.allclose(outputs.pred_boxes , original_outputs["pred_boxes"] , atol=1E-3 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["pred_masks"] , atol=1E-4 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
# Save model and image processor
logger.info(f'''Saving PyTorch model and image processor to {pytorch_dump_folder_path}...''' )
Path(__SCREAMING_SNAKE_CASE ).mkdir(exist_ok=__SCREAMING_SNAKE_CASE )
model.save_pretrained(__SCREAMING_SNAKE_CASE )
processor.save_pretrained(__SCREAMING_SNAKE_CASE )
if push_to_hub:
# Upload model and image processor to the hub
logger.info("Uploading PyTorch model and image processor to the hub..." )
model.push_to_hub(f'''nielsr/{model_name}''' )
processor.push_to_hub(f'''nielsr/{model_name}''' )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--model_name",
default="detr-resnet-50",
type=str,
choices=["detr-resnet-50", "detr-resnet-101"],
help="Name of the DETR model you'd like to convert.",
)
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to the folder to output PyTorch model."
)
parser.add_argument("--push_to_hub", action="store_true", help="Whether to push the model to the hub or not.")
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 721 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , ) -> int:
if config_name_or_path is None:
UpperCAmelCase_ = "facebook/rag-token-base" if model_type == "rag_token" else "facebook/rag-sequence-base"
if generator_tokenizer_name_or_path is None:
UpperCAmelCase_ = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
UpperCAmelCase_ = question_encoder_name_or_path
UpperCAmelCase_ = RagTokenForGeneration if model_type == "rag_token" else RagSequenceForGeneration
# Save model.
UpperCAmelCase_ = RagConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = AutoConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = gen_config
UpperCAmelCase_ = question_encoder_config
UpperCAmelCase_ = model_class.from_pretrained_question_encoder_generator(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , config=__SCREAMING_SNAKE_CASE )
rag_model.save_pretrained(__SCREAMING_SNAKE_CASE )
# Sanity check.
model_class.from_pretrained(__SCREAMING_SNAKE_CASE )
# Save tokenizers.
UpperCAmelCase_ = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
gen_tokenizer.save_pretrained(dest_dir / "generator_tokenizer/" )
UpperCAmelCase_ = AutoTokenizer.from_pretrained(__SCREAMING_SNAKE_CASE )
question_encoder_tokenizer.save_pretrained(dest_dir / "question_encoder_tokenizer/" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument(
"--model_type",
choices=["rag_sequence", "rag_token"],
required=True,
type=str,
help="RAG model type: rag_sequence, rag_token",
)
parser.add_argument("--dest", type=str, required=True, help="Path to the output checkpoint directory.")
parser.add_argument("--generator_name_or_path", type=str, required=True, help="Generator model identifier")
parser.add_argument(
"--question_encoder_name_or_path", type=str, required=True, help="Question encoder model identifier"
)
parser.add_argument(
"--generator_tokenizer_name_or_path",
type=str,
help="Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``",
)
parser.add_argument(
"--question_encoder_tokenizer_name_or_path",
type=str,
help="Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``",
)
parser.add_argument(
"--config_name_or_path",
type=str,
help=(
"Identifier of the model config to use, if not provided, resolves to a base config for a given"
" ``model_type``"
),
)
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
)
| 700 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
# Initialise PyTorch model
UpperCAmelCase_ = MobileBertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ = MobileBertForPreTraining(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
UpperCAmelCase_ = load_tf_weights_in_mobilebert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 | 0 |
'''simple docstring'''
import requests
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> None:
UpperCAmelCase_ = {"Content-Type": "application/json"}
UpperCAmelCase_ = requests.post(__SCREAMING_SNAKE_CASE , json={"text": message_body} , headers=__SCREAMING_SNAKE_CASE )
if response.status_code != 200:
UpperCAmelCase_ = (
"Request to slack returned an error "
f'''{response.status_code}, the response is:\n{response.text}'''
)
raise ValueError(__SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message("<YOUR MESSAGE BODY>", "<SLACK CHANNEL URL>")
| 701 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = str(id_ )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = []
UpperCAmelCase_ = {} # {vertex:distance}
def __lt__( self , lowerCAmelCase ):
return self.key < other.key
def __repr__( self ):
return self.id
def A__ ( self , lowerCAmelCase ):
self.neighbors.append(lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = weight
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __SCREAMING_SNAKE_CASE )
graph[b - 1].add_edge(graph[a - 1] , __SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list:
UpperCAmelCase_ = []
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = graph[:]
while q:
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE )
q.remove(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Iterator[tuple]:
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
hq.heapify(__SCREAMING_SNAKE_CASE )
while h:
UpperCAmelCase_ = hq.heappop(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
hq.heapify(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 0 |
import warnings
from ...utils import logging
from .image_processing_layoutlmva import LayoutLMvaImageProcessor
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
warnings.warn(
"The class LayoutLMv2FeatureExtractor is deprecated and will be removed in version 5 of Transformers."
" Please use LayoutLMv2ImageProcessor instead." , lowerCAmelCase , )
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
import inspect
import os
import unittest
import torch
import accelerate
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_multi_gpu
from accelerate.utils import patch_environment
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase_ = os.path.sep.join(
mod_file.split(os.path.sep )[:-1] + ["scripts", "test_distributed_data_loop.py"] )
UpperCAmelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_ops.py"] )
@require_multi_gpu
def A__ ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
UpperCAmelCase_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.test_file_path]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def A__ ( self ):
print(f'''Found {torch.cuda.device_count()} devices.''' )
UpperCAmelCase_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.operation_file_path]
print(f'''Command: {cmd}''' )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def A__ ( self ):
UpperCAmelCase_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', inspect.getfile(self.__class__ )]
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() )
@require_multi_gpu
def A__ ( self ):
print(f'''Found {torch.cuda.device_count()} devices, using 2 devices only''' )
UpperCAmelCase_ = ["torchrun", f'''--nproc_per_node={torch.cuda.device_count()}''', self.data_loop_file_path]
with patch_environment(omp_num_threads=1 , cuda_visible_devices="0,1" ):
execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = Accelerator()
SCREAMING_SNAKE_CASE = (accelerator.state.process_index + 2, 10)
SCREAMING_SNAKE_CASE = torch.randint(0, 10, shape).to(accelerator.device)
SCREAMING_SNAKE_CASE = ""
SCREAMING_SNAKE_CASE = accelerator.pad_across_processes(tensor)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
if not torch.equal(tensora[: accelerator.state.process_index + 2], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[accelerator.state.process_index + 2 :] == 0):
error_msg += "Padding was not done with the right value (0)."
SCREAMING_SNAKE_CASE = accelerator.pad_across_processes(tensor, pad_first=True)
if tensora.shape[0] != accelerator.state.num_processes + 1:
error_msg += f"Found shape {tensora.shape} but should have {accelerator.state.num_processes + 1} at dim 0."
SCREAMING_SNAKE_CASE = accelerator.state.num_processes - accelerator.state.process_index - 1
if not torch.equal(tensora[index:], tensor):
error_msg += "Tensors have different values."
if not torch.all(tensora[:index] == 0):
error_msg += "Padding was not done with the right value (0)."
# Raise error at the end to make sure we don't stop at the first failure.
if len(error_msg) > 0:
raise ValueError(error_msg)
| 703 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": 512,
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : List[str] = RetriBertTokenizer
lowerCAmelCase_ : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 0 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = inspect.getfile(accelerate.test_utils )
UpperCAmelCase_ = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
UpperCAmelCase_ = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def A__ ( self ):
UpperCAmelCase_ = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
UpperCAmelCase_ = [sys.executable] + distributed_args
execute_subprocess_async(lowerCAmelCase , env=os.environ.copy() )
| 704 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : int = ['input_ids', 'attention_mask']
lowerCAmelCase_ : str = DistilBertTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 0 |
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModel, PreTrainedModel
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = nn.functional.normalize(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = nn.functional.normalize(__SCREAMING_SNAKE_CASE )
return torch.mm(__SCREAMING_SNAKE_CASE , normalized_text_embeds.t() )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = CLIPConfig
lowerCAmelCase_ : List[Any] = ['CLIPEncoderLayer']
def __init__( self , lowerCAmelCase ):
super().__init__(lowerCAmelCase )
UpperCAmelCase_ = CLIPVisionModel(config.vision_config )
UpperCAmelCase_ = nn.Linear(config.vision_config.hidden_size , config.projection_dim , bias=lowerCAmelCase )
UpperCAmelCase_ = nn.Parameter(torch.ones(17 , config.projection_dim ) , requires_grad=lowerCAmelCase )
UpperCAmelCase_ = nn.Parameter(torch.ones(3 , config.projection_dim ) , requires_grad=lowerCAmelCase )
UpperCAmelCase_ = nn.Parameter(torch.ones(17 ) , requires_grad=lowerCAmelCase )
UpperCAmelCase_ = nn.Parameter(torch.ones(3 ) , requires_grad=lowerCAmelCase )
@torch.no_grad()
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.vision_model(lowerCAmelCase )[1] # pooled_output
UpperCAmelCase_ = self.visual_projection(lowerCAmelCase )
# we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
UpperCAmelCase_ = cosine_distance(lowerCAmelCase , self.special_care_embeds ).cpu().float().numpy()
UpperCAmelCase_ = cosine_distance(lowerCAmelCase , self.concept_embeds ).cpu().float().numpy()
UpperCAmelCase_ = []
UpperCAmelCase_ = image_embeds.shape[0]
for i in range(lowerCAmelCase ):
UpperCAmelCase_ = {"special_scores": {}, "special_care": [], "concept_scores": {}, "bad_concepts": []}
# increase this value to create a stronger `nfsw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCAmelCase_ = 0.0
for concept_idx in range(len(special_cos_dist[0] ) ):
UpperCAmelCase_ = special_cos_dist[i][concept_idx]
UpperCAmelCase_ = self.special_care_embeds_weights[concept_idx].item()
UpperCAmelCase_ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["special_scores"][concept_idx] > 0:
result_img["special_care"].append({concept_idx, result_img["special_scores"][concept_idx]} )
UpperCAmelCase_ = 0.01
for concept_idx in range(len(cos_dist[0] ) ):
UpperCAmelCase_ = cos_dist[i][concept_idx]
UpperCAmelCase_ = self.concept_embeds_weights[concept_idx].item()
UpperCAmelCase_ = round(concept_cos - concept_threshold + adjustment , 3 )
if result_img["concept_scores"][concept_idx] > 0:
result_img["bad_concepts"].append(lowerCAmelCase )
result.append(lowerCAmelCase )
UpperCAmelCase_ = [len(res["bad_concepts"] ) > 0 for res in result]
return images, has_nsfw_concepts
@torch.no_grad()
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.vision_model(lowerCAmelCase )[1] # pooled_output
UpperCAmelCase_ = self.visual_projection(lowerCAmelCase )
UpperCAmelCase_ = cosine_distance(lowerCAmelCase , self.special_care_embeds )
UpperCAmelCase_ = cosine_distance(lowerCAmelCase , self.concept_embeds )
# increase this value to create a stronger `nsfw` filter
# at the cost of increasing the possibility of filtering benign images
UpperCAmelCase_ = 0.0
UpperCAmelCase_ = special_cos_dist - self.special_care_embeds_weights + adjustment
# special_scores = special_scores.round(decimals=3)
UpperCAmelCase_ = torch.any(special_scores > 0 , dim=1 )
UpperCAmelCase_ = special_care * 0.01
UpperCAmelCase_ = special_adjustment.unsqueeze(1 ).expand(-1 , cos_dist.shape[1] )
UpperCAmelCase_ = (cos_dist - self.concept_embeds_weights) + special_adjustment
# concept_scores = concept_scores.round(decimals=3)
UpperCAmelCase_ = torch.any(concept_scores > 0 , dim=1 )
return images, has_nsfw_concepts
| 705 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.array:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
UpperCAmelCase_ = "f32le"
UpperCAmelCase_ = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase_ = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
UpperCAmelCase_ = output_stream[0]
UpperCAmelCase_ = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "f32le" , ) -> Dict:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
if format_for_conversion == "s16le":
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCAmelCase_ = platform.system()
if system == "Linux":
UpperCAmelCase_ = "alsa"
UpperCAmelCase_ = "default"
elif system == "Darwin":
UpperCAmelCase_ = "avfoundation"
UpperCAmelCase_ = ":0"
elif system == "Windows":
UpperCAmelCase_ = "dshow"
UpperCAmelCase_ = "default"
UpperCAmelCase_ = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase_ = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "f32le" , ) -> int:
if stream_chunk_s is not None:
UpperCAmelCase_ = stream_chunk_s
else:
UpperCAmelCase_ = chunk_length_s
UpperCAmelCase_ = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
UpperCAmelCase_ = np.intaa
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = np.floataa
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCAmelCase_ = chunk_length_s / 6
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
UpperCAmelCase_ = [stride_length_s, stride_length_s]
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase_ = datetime.datetime.now()
UpperCAmelCase_ = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
UpperCAmelCase_ = np.frombuffer(item["raw"] , dtype=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
UpperCAmelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Dict:
UpperCAmelCase_ = B""
UpperCAmelCase_ , UpperCAmelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCAmelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
UpperCAmelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase_ = (_stride_left, stride_right)
UpperCAmelCase_ = {"raw": acc[:chunk_len], "stride": stride}
if stream:
UpperCAmelCase_ = False
yield item
UpperCAmelCase_ = stride_left
UpperCAmelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
UpperCAmelCase_ = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
UpperCAmelCase_ = False
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
UpperCAmelCase_ = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 23 | 0 |
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
import torch
from ...utils import is_npu_available, is_xpu_available
from .config_args import ClusterConfig, default_json_config_file
from .config_utils import SubcommandHelpFormatter
SCREAMING_SNAKE_CASE = "Create a default config file for Accelerate with only a few flags set."
def snake_case__ ( __SCREAMING_SNAKE_CASE="no" , __SCREAMING_SNAKE_CASE = default_json_config_file , __SCREAMING_SNAKE_CASE = False ) -> Optional[Any]:
UpperCAmelCase_ = Path(__SCREAMING_SNAKE_CASE )
path.parent.mkdir(parents=__SCREAMING_SNAKE_CASE , exist_ok=__SCREAMING_SNAKE_CASE )
if path.exists():
print(
f'''Configuration already exists at {save_location}, will not override. Run `accelerate config` manually or pass a different `save_location`.''' )
return False
UpperCAmelCase_ = mixed_precision.lower()
if mixed_precision not in ["no", "fp16", "bf16", "fp8"]:
raise ValueError(
f'''`mixed_precision` should be one of \'no\', \'fp16\', \'bf16\', or \'fp8\'. Received {mixed_precision}''' )
UpperCAmelCase_ = {
"compute_environment": "LOCAL_MACHINE",
"mixed_precision": mixed_precision,
}
if torch.cuda.is_available():
UpperCAmelCase_ = torch.cuda.device_count()
UpperCAmelCase_ = num_gpus
UpperCAmelCase_ = False
if num_gpus > 1:
UpperCAmelCase_ = "MULTI_GPU"
else:
UpperCAmelCase_ = "NO"
elif is_xpu_available() and use_xpu:
UpperCAmelCase_ = torch.xpu.device_count()
UpperCAmelCase_ = num_xpus
UpperCAmelCase_ = False
if num_xpus > 1:
UpperCAmelCase_ = "MULTI_XPU"
else:
UpperCAmelCase_ = "NO"
elif is_npu_available():
UpperCAmelCase_ = torch.npu.device_count()
UpperCAmelCase_ = num_npus
UpperCAmelCase_ = False
if num_npus > 1:
UpperCAmelCase_ = "MULTI_NPU"
else:
UpperCAmelCase_ = "NO"
else:
UpperCAmelCase_ = 0
UpperCAmelCase_ = True
UpperCAmelCase_ = 1
UpperCAmelCase_ = "NO"
UpperCAmelCase_ = ClusterConfig(**__SCREAMING_SNAKE_CASE )
config.to_json_file(__SCREAMING_SNAKE_CASE )
return path
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = parser.add_parser("default" , parents=__SCREAMING_SNAKE_CASE , help=__SCREAMING_SNAKE_CASE , formatter_class=__SCREAMING_SNAKE_CASE )
parser.add_argument(
"--config_file" , default=__SCREAMING_SNAKE_CASE , help=(
"The path to use to store the config file. Will default to a file named default_config.yaml in the cache "
"location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have "
"such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed "
"with 'huggingface'."
) , dest="save_location" , )
parser.add_argument(
"--mixed_precision" , choices=["no", "fp16", "bf16"] , type=__SCREAMING_SNAKE_CASE , help="Whether or not to use mixed precision training. "
"Choose between FP16 and BF16 (bfloat16) training. "
"BF16 training is only supported on Nvidia Ampere GPUs and PyTorch 1.10 or later." , default="no" , )
parser.set_defaults(func=__SCREAMING_SNAKE_CASE )
return parser
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = write_basic_config(args.mixed_precision , args.save_location )
if config_file:
print(f'''accelerate configuration saved at {config_file}''' )
| 706 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 768 , ):
super().__init__()
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.ones(1 , lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase = None , lowerCAmelCase = None , ):
UpperCAmelCase_ = nn.Parameter(self.mean.to(lowerCAmelCase ).to(lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(self.std.to(lowerCAmelCase ).to(lowerCAmelCase ) )
return self
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds * self.std) + self.mean
return embeds
| 23 | 0 |
'''simple docstring'''
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
if index == number_of_items:
return 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = 0
UpperCAmelCase_ = knapsack(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , index + 1 )
if weights[index] <= max_weight:
UpperCAmelCase_ = values[index] + knapsack(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , max_weight - weights[index] , index + 1 )
return max(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
if prompt is not None:
UpperCAmelCase_ = prompt
if generate_kwargs is not None:
UpperCAmelCase_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCAmelCase_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
UpperCAmelCase_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCAmelCase , **lowerCAmelCase ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
UpperCAmelCase_ = self.model.config.model_type
if model_type == "git":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(text=lowerCAmelCase , add_special_tokens=lowerCAmelCase ).input_ids
UpperCAmelCase_ = [self.tokenizer.cls_token_id] + input_ids
UpperCAmelCase_ = torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , header_text=lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCAmelCase_ = None
return model_inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCAmelCase )
and all(x is None for x in model_inputs["input_ids"] )
):
UpperCAmelCase_ = None
if generate_kwargs is None:
UpperCAmelCase_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCAmelCase_ = model_inputs.pop(self.model.main_input_name )
UpperCAmelCase_ = self.model.generate(lowerCAmelCase , **lowerCAmelCase , **lowerCAmelCase )
return model_outputs
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = []
for output_ids in model_outputs:
UpperCAmelCase_ = {
"generated_text": self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , )
}
records.append(lowerCAmelCase )
return records
| 23 | 0 |
from bisect import bisect
from itertools import accumulate
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ = sorted(zip(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , key=lambda __SCREAMING_SNAKE_CASE : x[0] / x[1] , reverse=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ = [i[0] for i in r], [i[1] for i in r]
UpperCAmelCase_ = list(accumulate(__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ = bisect(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return (
0
if k == 0
else sum(vl[:k] ) + (w - acc[k - 1]) * (vl[k]) / (wt[k])
if k != n
else sum(vl[:k] )
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 708 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = TextToVideoSDPipeline
lowerCAmelCase_ : Dict = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowerCAmelCase_ : Optional[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def A__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(lowerCAmelCase )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def A__ ( self ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = TextToVideoSDPipeline(**lowerCAmelCase )
UpperCAmelCase_ = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase_ = "np"
UpperCAmelCase_ = sd_pipe(**lowerCAmelCase ).frames
UpperCAmelCase_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
UpperCAmelCase_ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=1e-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def A__ ( self ):
pass
def A__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=25 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 23 | 0 |
from typing import List
from .keymap import KEYMAP, get_character
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Any:
def decorator(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = getattr(__SCREAMING_SNAKE_CASE , "handle_key" , [] )
handle += [key]
setattr(__SCREAMING_SNAKE_CASE , "handle_key" , __SCREAMING_SNAKE_CASE )
return func
return decorator
def snake_case__ ( *__SCREAMING_SNAKE_CASE ) -> str:
def decorator(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = getattr(__SCREAMING_SNAKE_CASE , "handle_key" , [] )
handle += keys
setattr(__SCREAMING_SNAKE_CASE , "handle_key" , __SCREAMING_SNAKE_CASE )
return func
return decorator
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __new__( cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = super().__new__(cls , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if not hasattr(lowerCAmelCase , "key_handler" ):
setattr(lowerCAmelCase , "key_handler" , {} )
setattr(lowerCAmelCase , "handle_input" , KeyHandler.handle_input )
for value in attrs.values():
UpperCAmelCase_ = getattr(lowerCAmelCase , "handle_key" , [] )
for key in handled_keys:
UpperCAmelCase_ = value
return new_cls
@staticmethod
def A__ ( cls ):
UpperCAmelCase_ = get_character()
if char != KEYMAP["undefined"]:
UpperCAmelCase_ = ord(lowerCAmelCase )
UpperCAmelCase_ = cls.key_handler.get(lowerCAmelCase )
if handler:
UpperCAmelCase_ = char
return handler(cls )
else:
return None
def snake_case__ ( cls ) -> str:
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() )
| 709 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0
UpperCAmelCase_ = ugly_nums[ia] * 2
UpperCAmelCase_ = ugly_nums[ia] * 3
UpperCAmelCase_ = ugly_nums[ia] * 5
for _ in range(1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
ugly_nums.append(__SCREAMING_SNAKE_CASE )
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 23 | 0 |
import tensorflow as tf
from ...tf_utils import shape_list
class lowerCamelCase ( tf.keras.layers.Layer ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=1 , lowerCAmelCase=False , **lowerCAmelCase ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = d_embed
UpperCAmelCase_ = d_proj
UpperCAmelCase_ = cutoffs + [vocab_size]
UpperCAmelCase_ = [0] + self.cutoffs
UpperCAmelCase_ = div_val
UpperCAmelCase_ = self.cutoffs[0]
UpperCAmelCase_ = len(self.cutoffs ) - 1
UpperCAmelCase_ = self.shortlist_size + self.n_clusters
UpperCAmelCase_ = keep_order
UpperCAmelCase_ = []
UpperCAmelCase_ = []
def A__ ( self , lowerCAmelCase ):
if self.n_clusters > 0:
UpperCAmelCase_ = self.add_weight(
shape=(self.n_clusters, self.d_embed) , initializer="zeros" , trainable=lowerCAmelCase , name="cluster_weight" )
UpperCAmelCase_ = self.add_weight(
shape=(self.n_clusters,) , initializer="zeros" , trainable=lowerCAmelCase , name="cluster_bias" )
if self.div_val == 1:
for i in range(len(self.cutoffs ) ):
if self.d_proj != self.d_embed:
UpperCAmelCase_ = self.add_weight(
shape=(self.d_embed, self.d_proj) , initializer="zeros" , trainable=lowerCAmelCase , name=f'''out_projs_._{i}''' , )
self.out_projs.append(lowerCAmelCase )
else:
self.out_projs.append(lowerCAmelCase )
UpperCAmelCase_ = self.add_weight(
shape=(self.vocab_size, self.d_embed) , initializer="zeros" , trainable=lowerCAmelCase , name=f'''out_layers_._{i}_._weight''' , )
UpperCAmelCase_ = self.add_weight(
shape=(self.vocab_size,) , initializer="zeros" , trainable=lowerCAmelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
else:
for i in range(len(self.cutoffs ) ):
UpperCAmelCase_ , UpperCAmelCase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
UpperCAmelCase_ = self.d_embed // (self.div_val**i)
UpperCAmelCase_ = self.add_weight(
shape=(d_emb_i, self.d_proj) , initializer="zeros" , trainable=lowerCAmelCase , name=f'''out_projs_._{i}''' )
self.out_projs.append(lowerCAmelCase )
UpperCAmelCase_ = self.add_weight(
shape=(r_idx - l_idx, d_emb_i) , initializer="zeros" , trainable=lowerCAmelCase , name=f'''out_layers_._{i}_._weight''' , )
UpperCAmelCase_ = self.add_weight(
shape=(r_idx - l_idx,) , initializer="zeros" , trainable=lowerCAmelCase , name=f'''out_layers_._{i}_._bias''' , )
self.out_layers.append((weight, bias) )
super().build(lowerCAmelCase )
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = x
if proj is not None:
UpperCAmelCase_ = tf.einsum("ibd,ed->ibe" , lowerCAmelCase , lowerCAmelCase )
return tf.einsum("ibd,nd->ibn" , lowerCAmelCase , lowerCAmelCase ) + b
@staticmethod
def A__ ( lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = shape_list(lowerCAmelCase )
UpperCAmelCase_ = tf.range(lp_size[0] , dtype=target.dtype )
UpperCAmelCase_ = tf.stack([r, target] , 1 )
return tf.gather_nd(lowerCAmelCase , lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=True , lowerCAmelCase=False ):
UpperCAmelCase_ = 0
if self.n_clusters == 0:
UpperCAmelCase_ = self._logit(lowerCAmelCase , self.out_layers[0][0] , self.out_layers[0][1] , self.out_projs[0] )
if target is not None:
UpperCAmelCase_ = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=lowerCAmelCase , logits=lowerCAmelCase )
UpperCAmelCase_ = tf.nn.log_softmax(lowerCAmelCase , axis=-1 )
else:
UpperCAmelCase_ = shape_list(lowerCAmelCase )
UpperCAmelCase_ = []
UpperCAmelCase_ = tf.zeros(hidden_sizes[:2] )
for i in range(len(self.cutoffs ) ):
UpperCAmelCase_ , UpperCAmelCase_ = self.cutoff_ends[i], self.cutoff_ends[i + 1]
if target is not None:
UpperCAmelCase_ = (target >= l_idx) & (target < r_idx)
UpperCAmelCase_ = tf.where(lowerCAmelCase )
UpperCAmelCase_ = tf.boolean_mask(lowerCAmelCase , lowerCAmelCase ) - l_idx
if self.div_val == 1:
UpperCAmelCase_ = self.out_layers[0][0][l_idx:r_idx]
UpperCAmelCase_ = self.out_layers[0][1][l_idx:r_idx]
else:
UpperCAmelCase_ = self.out_layers[i][0]
UpperCAmelCase_ = self.out_layers[i][1]
if i == 0:
UpperCAmelCase_ = tf.concat([cur_W, self.cluster_weight] , 0 )
UpperCAmelCase_ = tf.concat([cur_b, self.cluster_bias] , 0 )
UpperCAmelCase_ = self._logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , self.out_projs[0] )
UpperCAmelCase_ = tf.nn.log_softmax(lowerCAmelCase )
out.append(head_logprob[..., : self.cutoffs[0]] )
if target is not None:
UpperCAmelCase_ = tf.boolean_mask(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = self._gather_logprob(lowerCAmelCase , lowerCAmelCase )
else:
UpperCAmelCase_ = self._logit(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , self.out_projs[i] )
UpperCAmelCase_ = tf.nn.log_softmax(lowerCAmelCase )
UpperCAmelCase_ = self.cutoffs[0] + i - 1 # No probability for the head cluster
UpperCAmelCase_ = head_logprob[..., cluster_prob_idx, None] + tail_logprob
out.append(lowerCAmelCase )
if target is not None:
UpperCAmelCase_ = tf.boolean_mask(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = tf.boolean_mask(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = self._gather_logprob(lowerCAmelCase , lowerCAmelCase )
cur_logprob += cur_head_logprob[:, self.cutoff_ends[1] + i - 1]
if target is not None:
loss += tf.scatter_nd(lowerCAmelCase , -cur_logprob , shape_list(lowerCAmelCase ) )
UpperCAmelCase_ = tf.concat(lowerCAmelCase , axis=-1 )
if target is not None:
if return_mean:
UpperCAmelCase_ = tf.reduce_mean(lowerCAmelCase )
# Add the training-time loss value to the layer using `self.add_loss()`.
self.add_loss(lowerCAmelCase )
# Log the loss as a metric (we could log arbitrary metrics,
# including different metrics for training and inference.
self.add_metric(lowerCAmelCase , name=self.name , aggregation="mean" if return_mean else "" )
return out
| 710 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=224 , lowerCAmelCase=1000 , lowerCAmelCase=[3, 3, 6, 4] , lowerCAmelCase=[48, 56, 112, 220] , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = layer_depths
UpperCAmelCase_ = embed_dims
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1e-5 , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ):
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = self.prepare_config_and_inputs()
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCAmelCase_ : int = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[Any] = False
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def A__ ( self ):
pass
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def A__ ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def A__ ( self ):
pass
def A__ ( self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
def _config_zero_init(lowerCAmelCase ):
UpperCAmelCase_ = copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1e-1_0 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
UpperCAmelCase_ = _config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self ):
pass
def snake_case__ ( ) -> str:
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(lowerCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 23 | 0 |
import numpy as np
import torch
from torch.utils.data import Dataset, IterableDataset
from ..utils.generic import ModelOutput
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = dataset
UpperCAmelCase_ = process
UpperCAmelCase_ = params
def __len__( self ):
return len(self.dataset )
def __getitem__( self , lowerCAmelCase ):
UpperCAmelCase_ = self.dataset[i]
UpperCAmelCase_ = self.process(lowerCAmelCase , **self.params )
return processed
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = loader
UpperCAmelCase_ = infer
UpperCAmelCase_ = params
if loader_batch_size == 1:
# Let's spare some time by deactivating altogether
UpperCAmelCase_ = None
UpperCAmelCase_ = loader_batch_size
# Internal bookkeeping
UpperCAmelCase_ = None
UpperCAmelCase_ = None
def __len__( self ):
return len(self.loader )
def __iter__( self ):
UpperCAmelCase_ = iter(self.loader )
return self
def A__ ( self ):
if isinstance(self._loader_batch_data , torch.Tensor ):
# Batch data is simple tensor, just fetch the slice
UpperCAmelCase_ = self._loader_batch_data[self._loader_batch_index]
else:
# Batch data is assumed to be BaseModelOutput (or dict)
UpperCAmelCase_ = {}
for k, element in self._loader_batch_data.items():
if isinstance(lowerCAmelCase , lowerCAmelCase ):
# Convert ModelOutput to tuple first
UpperCAmelCase_ = element.to_tuple()
if isinstance(element[0] , torch.Tensor ):
UpperCAmelCase_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCAmelCase_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if k in {"hidden_states", "past_key_values", "attentions"} and isinstance(lowerCAmelCase , lowerCAmelCase ):
# Those are stored as lists of tensors so need specific unbatching.
if isinstance(element[0] , torch.Tensor ):
UpperCAmelCase_ = tuple(el[self._loader_batch_index].unsqueeze(0 ) for el in element )
elif isinstance(element[0] , np.ndarray ):
UpperCAmelCase_ = tuple(np.expand_dims(el[self._loader_batch_index] , 0 ) for el in element )
continue
if element is None:
# This can happen for optional data that get passed around
UpperCAmelCase_ = None
elif isinstance(element[self._loader_batch_index] , torch.Tensor ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase_ = element[self._loader_batch_index].unsqueeze(0 )
elif isinstance(element[self._loader_batch_index] , np.ndarray ):
# Take correct batch data, but make it looked like batch_size=1
# For compatibility with other methods within transformers
UpperCAmelCase_ = np.expand_dims(element[self._loader_batch_index] , 0 )
else:
# This is typically a list, so no need to `unsqueeze`.
UpperCAmelCase_ = element[self._loader_batch_index]
# Recreate the element by reusing the original class to make it look
# batch_size=1
UpperCAmelCase_ = self._loader_batch_data.__class__(lowerCAmelCase )
self._loader_batch_index += 1
return result
def A__ ( self ):
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
# We are currently unrolling a batch so we just need to return
# the current item within a batch
return self.loader_batch_item()
# We're out of items within a batch
UpperCAmelCase_ = next(self.iterator )
UpperCAmelCase_ = self.infer(lowerCAmelCase , **self.params )
# We now have a batch of "inferred things".
if self.loader_batch_size is not None:
# Try to infer the size of the batch
if isinstance(lowerCAmelCase , torch.Tensor ):
UpperCAmelCase_ = processed
else:
UpperCAmelCase_ = list(processed.keys() )[0]
UpperCAmelCase_ = processed[key]
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = len(lowerCAmelCase )
else:
UpperCAmelCase_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase_ = observed_batch_size
# Setting internal index to unwrap the batch
UpperCAmelCase_ = processed
UpperCAmelCase_ = 0
return self.loader_batch_item()
else:
# We're not unrolling batches
return processed
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=None ):
super().__init__(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def __iter__( self ):
UpperCAmelCase_ = iter(self.loader )
UpperCAmelCase_ = None
return self
def A__ ( self ):
if self.subiterator is None:
UpperCAmelCase_ = self.infer(next(self.iterator ) , **self.params )
try:
# Try to return next item
UpperCAmelCase_ = next(self.subiterator )
except StopIteration:
# When a preprocess iterator ends, we can start lookig at the next item
# ChunkIterator will keep feeding until ALL elements of iterator
# all have created their subiterator and have been iterating against.
#
# Another way to look at it, is we're basically flattening lists of lists
# into a single list, but with generators
UpperCAmelCase_ = self.infer(next(self.iterator ) , **self.params )
UpperCAmelCase_ = next(self.subiterator )
return processed
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __iter__( self ):
UpperCAmelCase_ = iter(self.loader )
return self
def A__ ( self ):
# Extremely similar to PipelineIterator in its unpacking mechanism
# BUT, we have an extra required item which is the presence of `is_last`
# That is because everything is flattened by `PipelineChunkIterator` we
# need to keep track of how to regroup here in the original `process`
# boundaries so that `process` and `postprocess` see the same data.
# This iterator accumulates items (possibly while unbatching) until it
# its a `is_last` and then just passes it on to the caller.
UpperCAmelCase_ = False
UpperCAmelCase_ = []
if self._loader_batch_index is not None and self._loader_batch_index < self.loader_batch_size:
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase_ = self.loader_batch_item()
UpperCAmelCase_ = item.pop("is_last" )
accumulator.append(lowerCAmelCase )
if is_last:
return accumulator
while not is_last:
UpperCAmelCase_ = self.infer(next(self.iterator ) , **self.params )
if self.loader_batch_size is not None:
if isinstance(lowerCAmelCase , torch.Tensor ):
UpperCAmelCase_ = processed
else:
UpperCAmelCase_ = list(processed.keys() )[0]
UpperCAmelCase_ = processed[key]
if isinstance(lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = len(lowerCAmelCase )
else:
UpperCAmelCase_ = first_tensor.shape[0]
if 0 < observed_batch_size < self.loader_batch_size:
# could be last batch so we can't unroll as many
# elements.
UpperCAmelCase_ = observed_batch_size
UpperCAmelCase_ = processed
UpperCAmelCase_ = 0
while self._loader_batch_index < self.loader_batch_size:
UpperCAmelCase_ = self.loader_batch_item()
UpperCAmelCase_ = item.pop("is_last" )
accumulator.append(lowerCAmelCase )
if is_last:
return accumulator
else:
UpperCAmelCase_ = processed
UpperCAmelCase_ = item.pop("is_last" )
accumulator.append(lowerCAmelCase )
return accumulator
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = dataset
UpperCAmelCase_ = key
def __len__( self ):
return len(self.dataset )
def __getitem__( self , lowerCAmelCase ):
return self.dataset[i][self.key]
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = dataset
UpperCAmelCase_ = keya
UpperCAmelCase_ = keya
def __len__( self ):
return len(self.dataset )
def __getitem__( self , lowerCAmelCase ):
return {"text": self.dataset[i][self.keya], "text_pair": self.dataset[i][self.keya]}
| 711 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ , UpperCAmelCase_ = len(__SCREAMING_SNAKE_CASE ), len(grid[0] )
if (
min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) < 0
or row == row_length
or col == col_length
or (row, col) in visit
or grid[row][col] == 1
):
return 0
if row == row_length - 1 and col == col_length - 1:
return 1
visit.add((row, col) )
UpperCAmelCase_ = 0
count += depth_first_search(__SCREAMING_SNAKE_CASE , row + 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE , row - 1 , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , col + 1 , __SCREAMING_SNAKE_CASE )
count += depth_first_search(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , col - 1 , __SCREAMING_SNAKE_CASE )
visit.remove((row, col) )
return count
if __name__ == "__main__":
import doctest
doctest.testmod()
| 712 |
import math
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list[int]:
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = int(math.sqrt(__SCREAMING_SNAKE_CASE ) ) # Size of every segment
UpperCAmelCase_ = [True] * (end + 1)
UpperCAmelCase_ = []
while start <= end:
if temp[start] is True:
in_prime.append(__SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
start += 1
prime += in_prime
UpperCAmelCase_ = end + 1
UpperCAmelCase_ = min(2 * end , __SCREAMING_SNAKE_CASE )
while low <= n:
UpperCAmelCase_ = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__SCREAMING_SNAKE_CASE , high + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase_ = high + 1
UpperCAmelCase_ = min(high + end , __SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 23 | 0 |
import os
import tempfile
import unittest
from transformers.models.marian.convert_marian_tatoeba_to_pytorch import DEFAULT_REPO, TatoebaConverter
from transformers.testing_utils import slow
from transformers.utils import cached_property
@unittest.skipUnless(os.path.exists(lowercase__ ), 'Tatoeba directory does not exist.' )
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
UpperCAmelCase_ = tempfile.mkdtemp()
return TatoebaConverter(save_dir=lowerCAmelCase )
@slow
def A__ ( self ):
self.resolver.convert_models(["heb-eng"] )
@slow
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.resolver.write_model_card("opus-mt-he-en" , dry_run=lowerCAmelCase )
assert mmeta["long_pair"] == "heb-eng"
| 713 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : torch.FloatTensor
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 32 , lowerCAmelCase = 64 , lowerCAmelCase = 20 , lowerCAmelCase = 768 , lowerCAmelCase=77 , lowerCAmelCase=4 , lowerCAmelCase = 0.0 , lowerCAmelCase = "silu" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "linear" , lowerCAmelCase = "prd" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ):
super().__init__()
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = attention_head_dim
UpperCAmelCase_ = num_attention_heads * attention_head_dim
UpperCAmelCase_ = additional_embeddings
UpperCAmelCase_ = time_embed_dim or inner_dim
UpperCAmelCase_ = embedding_proj_dim or embedding_dim
UpperCAmelCase_ = clip_embed_dim or embedding_dim
UpperCAmelCase_ = Timesteps(lowerCAmelCase , lowerCAmelCase , 0 )
UpperCAmelCase_ = TimestepEmbedding(lowerCAmelCase , lowerCAmelCase , out_dim=lowerCAmelCase , act_fn=lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if embedding_proj_norm_type is None:
UpperCAmelCase_ = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if encoder_hid_proj_type is None:
UpperCAmelCase_ = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCAmelCase ) )
if added_emb_type == "prd":
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , 1 , lowerCAmelCase ) )
elif added_emb_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , dropout=lowerCAmelCase , activation_fn="gelu" , attention_bias=lowerCAmelCase , )
for d in range(lowerCAmelCase )
] )
if norm_in_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
elif norm_in_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , lowerCAmelCase , persistent=lowerCAmelCase )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ):
UpperCAmelCase_ = {}
def fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
UpperCAmelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return processors
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
module.set_processor(lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
self.set_attn_processor(AttnProcessor() )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , ):
UpperCAmelCase_ = hidden_states.shape[0]
UpperCAmelCase_ = timestep
if not torch.is_tensor(lowerCAmelCase ):
UpperCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ = timesteps * torch.ones(lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase_ = self.time_proj(lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ = self.time_embedding(lowerCAmelCase )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ = self.embedding_proj_norm(lowerCAmelCase )
UpperCAmelCase_ = self.embedding_proj(lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ = self.encoder_hidden_states_proj(lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
UpperCAmelCase_ = self.proj_in(lowerCAmelCase )
UpperCAmelCase_ = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ = hidden_states[:, None, :]
UpperCAmelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCAmelCase , -1 , -1 )
additional_embeds.append(lowerCAmelCase )
UpperCAmelCase_ = torch.cat(
lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ = F.pad(
lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
UpperCAmelCase_ = F.pad(lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ = self.norm_in(lowerCAmelCase )
for block in self.transformer_blocks:
UpperCAmelCase_ = block(lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase_ = self.norm_out(lowerCAmelCase )
if self.prd_embedding is not None:
UpperCAmelCase_ = hidden_states[:, -1]
else:
UpperCAmelCase_ = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ = self.proj_to_clip_embeddings(lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 23 | 0 |
import inspect
import unittest
import warnings
from transformers import DeiTConfig
from transformers.models.auto import get_values
from transformers.testing_utils import (
require_accelerate,
require_torch,
require_torch_gpu,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
DeiTModel,
)
from transformers.models.deit.modeling_deit import DEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DeiTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=30 , lowerCAmelCase=2 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=32 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=10 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=None , lowerCAmelCase=2 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = image_size
UpperCAmelCase_ = patch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = scope
UpperCAmelCase_ = encoder_stride
# in DeiT, the seq length equals the number of patches + 2 (we add 2 for the [CLS] and distilation tokens)
UpperCAmelCase_ = (image_size // patch_size) ** 2
UpperCAmelCase_ = num_patches + 2
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return DeiTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = DeiTModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = DeiTForMaskedImageModeling(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = DeiTForMaskedImageModeling(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.type_sequence_label_size
UpperCAmelCase_ = DeiTForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
UpperCAmelCase_ = 1
UpperCAmelCase_ = DeiTForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
UpperCAmelCase_ = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def A__ ( self ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
(
(
UpperCAmelCase_
) , (
UpperCAmelCase_
) , (
UpperCAmelCase_
) ,
) = config_and_inputs
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = (
(
DeiTModel,
DeiTForImageClassification,
DeiTForImageClassificationWithTeacher,
DeiTForMaskedImageModeling,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : Optional[Any] = (
{
'feature-extraction': DeiTModel,
'image-classification': (DeiTForImageClassification, DeiTForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Dict = False
def A__ ( self ):
UpperCAmelCase_ = DeiTModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="DeiT does not use inputs_embeds" )
def A__ ( self ):
pass
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase=False ):
UpperCAmelCase_ = super()._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if return_labels:
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def A__ ( self ):
if not self.model_tester.is_training:
return
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
# DeiTForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(lowerCAmelCase )
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
UpperCAmelCase_ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
UpperCAmelCase_ = model(**lowerCAmelCase ).loss
loss.backward()
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
UpperCAmelCase_ = False
UpperCAmelCase_ = True
for model_class in self.all_model_classes:
if model_class in get_values(lowerCAmelCase ) or not model_class.supports_gradient_checkpointing:
continue
# DeiTForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "DeiTForImageClassificationWithTeacher":
continue
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.gradient_checkpointing_enable()
model.to(lowerCAmelCase )
model.train()
UpperCAmelCase_ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
UpperCAmelCase_ = model(**lowerCAmelCase ).loss
loss.backward()
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(lowerCAmelCase ),
*get_values(lowerCAmelCase ),
]
or model_class.__name__ == "DeiTForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type['title']}''' ):
UpperCAmelCase_ = problem_type["title"]
UpperCAmelCase_ = problem_type["num_labels"]
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.train()
UpperCAmelCase_ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase , return_labels=lowerCAmelCase )
if problem_type["num_labels"] > 1:
UpperCAmelCase_ = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
UpperCAmelCase_ = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=lowerCAmelCase ) as warning_list:
UpperCAmelCase_ = model(**lowerCAmelCase ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def A__ ( self ):
for model_name in DEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = DeiTModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
def snake_case__ ( ) -> Any:
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return (
DeiTImageProcessor.from_pretrained("facebook/deit-base-distilled-patch16-224" )
if is_vision_available()
else None
)
@slow
def A__ ( self ):
UpperCAmelCase_ = DeiTForImageClassificationWithTeacher.from_pretrained("facebook/deit-base-distilled-patch16-224" ).to(
lowerCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([-1.0266, 0.1912, -1.2861] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
@slow
@require_accelerate
@require_torch_gpu
def A__ ( self ):
UpperCAmelCase_ = DeiTModel.from_pretrained(
"facebook/deit-base-distilled-patch16-224" , torch_dtype=torch.floataa , device_map="auto" )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase , return_tensors="pt" )
UpperCAmelCase_ = inputs.pixel_values.to(lowerCAmelCase )
# forward pass to make sure inference works in fp16
with torch.no_grad():
UpperCAmelCase_ = model(lowerCAmelCase )
| 714 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 0
while number > 0:
UpperCAmelCase_ = number % 10
sum_of_digits += last_digit
UpperCAmelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case__ ( __SCREAMING_SNAKE_CASE = 100 ) -> int:
UpperCAmelCase_ = factorial(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = split_and_add(__SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 715 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = 'xlm-roberta'
def __init__( self , lowerCAmelCase=3_0522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def A__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 23 | 0 |
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import datasets
import datasets.config
from .utils import require_beam
class lowerCamelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def A__ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"content": datasets.Value("string" )} ) , supervised_keys=lowerCAmelCase , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
return [datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_dummy_examples()} )]
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase )
class lowerCamelCase ( datasets.BeamBasedBuilder ):
'''simple docstring'''
def A__ ( self ):
return datasets.DatasetInfo(
features=datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) , supervised_keys=lowerCAmelCase , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
return [
datasets.SplitGenerator(name=datasets.Split.TRAIN , gen_kwargs={"examples": get_test_nested_examples()} )
]
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
import apache_beam as beam
return pipeline | "Load Examples" >> beam.Create(lowerCAmelCase )
def snake_case__ ( ) -> Tuple:
return [(i, {"content": content}) for i, content in enumerate(["foo", "bar", "foobar"] )]
def snake_case__ ( ) -> Any:
return [(i, {"a": {"b": [content]}}) for i, content in enumerate(["foo", "bar", "foobar"] )]
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@require_beam
def A__ ( self ):
UpperCAmelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ = DummyBeamDataset(cache_dir=lowerCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase_ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_dummy_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_dummy_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def A__ ( self ):
import apache_beam as beam
UpperCAmelCase_ = beam.io.parquetio.WriteToParquet
UpperCAmelCase_ = len(get_test_dummy_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ = DummyBeamDataset(cache_dir=lowerCAmelCase , beam_runner="DirectRunner" )
with patch("apache_beam.io.parquetio.WriteToParquet" ) as write_parquet_mock:
UpperCAmelCase_ = partial(lowerCAmelCase , num_shards=2 )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertTrue(
os.path.exists(
os.path.join(
lowerCAmelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train-00000-of-00002.arrow''' ) ) )
self.assertDictEqual(builder.info.features , datasets.Features({"content": datasets.Value("string" )} ) )
UpperCAmelCase_ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCAmelCase )
# Order is not preserved when sharding, so we just check that all the elements are there
self.assertListEqual(sorted(dset["train"]["content"] ) , sorted(["foo", "bar", "foobar"] ) )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
@require_beam
def A__ ( self ):
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ = DummyBeamDataset(cache_dir=lowerCAmelCase )
self.assertRaises(datasets.builder.MissingBeamOptions , builder.download_and_prepare )
@require_beam
def A__ ( self ):
UpperCAmelCase_ = len(get_test_nested_examples() )
with tempfile.TemporaryDirectory() as tmp_cache_dir:
UpperCAmelCase_ = NestedBeamDataset(cache_dir=lowerCAmelCase , beam_runner="DirectRunner" )
builder.download_and_prepare()
self.assertTrue(
os.path.exists(
os.path.join(lowerCAmelCase , builder.name , "default" , "0.0.0" , f'''{builder.name}-train.arrow''' ) ) )
self.assertDictEqual(
builder.info.features , datasets.Features({"a": datasets.Sequence({"b": datasets.Value("string" )} )} ) )
UpperCAmelCase_ = builder.as_dataset()
self.assertEqual(dset["train"].num_rows , lowerCAmelCase )
self.assertEqual(dset["train"].info.splits["train"].num_examples , lowerCAmelCase )
self.assertDictEqual(dset["train"][0] , get_test_nested_examples()[0][1] )
self.assertDictEqual(
dset["train"][expected_num_examples - 1] , get_test_nested_examples()[expected_num_examples - 1][1] )
self.assertTrue(
os.path.exists(os.path.join(lowerCAmelCase , builder.name , "default" , "0.0.0" , "dataset_info.json" ) ) )
del dset
| 716 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCAmelCase_ = "-" if number.startswith("-" ) else ""
UpperCAmelCase_ = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 23 | 0 |
import argparse
import json
from dataclasses import dataclass, field
from functools import partial
from pathlib import Path
from typing import Callable, Dict, List, Tuple
import timm
import torch
import torch.nn as nn
from classy_vision.models.regnet import RegNet, RegNetParams, RegNetYaagf, RegNetYaagf, RegNetYaaagf
from huggingface_hub import cached_download, hf_hub_url
from torch import Tensor
from vissl.models.model_helpers import get_trunk_forward_outputs
from transformers import AutoImageProcessor, RegNetConfig, RegNetForImageClassification, RegNetModel
from transformers.utils import logging
logging.set_verbosity_info()
SCREAMING_SNAKE_CASE = logging.get_logger()
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : nn.Module
lowerCAmelCase_ : List[nn.Module] = field(default_factory=lowercase__ )
lowerCAmelCase_ : list = field(default_factory=lowercase__ )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = len(list(m.modules() ) ) == 1 or isinstance(lowerCAmelCase , nn.Convad ) or isinstance(lowerCAmelCase , nn.BatchNormad )
if has_not_submodules:
self.traced.append(lowerCAmelCase )
def __call__( self , lowerCAmelCase ):
for m in self.module.modules():
self.handles.append(m.register_forward_hook(self._forward_hook ) )
self.module(lowerCAmelCase )
[x.remove() for x in self.handles]
return self
@property
def A__ ( self ):
# check the len of the state_dict keys to see if we have learnable params
return list(filter(lambda lowerCAmelCase : len(list(x.state_dict().keys() ) ) > 0 , self.traced ) )
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : nn.Module
lowerCAmelCase_ : nn.Module
lowerCAmelCase_ : int = 1
lowerCAmelCase_ : List = field(default_factory=lowercase__ )
lowerCAmelCase_ : List = field(default_factory=lowercase__ )
lowerCAmelCase_ : bool = True
def __call__( self , lowerCAmelCase ):
UpperCAmelCase_ = Tracker(self.dest )(lowerCAmelCase ).parametrized
UpperCAmelCase_ = Tracker(self.src )(lowerCAmelCase ).parametrized
UpperCAmelCase_ = list(filter(lambda lowerCAmelCase : type(lowerCAmelCase ) not in self.src_skip , lowerCAmelCase ) )
UpperCAmelCase_ = list(filter(lambda lowerCAmelCase : type(lowerCAmelCase ) not in self.dest_skip , lowerCAmelCase ) )
if len(lowerCAmelCase ) != len(lowerCAmelCase ) and self.raise_if_mismatch:
raise Exception(
f'''Numbers of operations are different. Source module has {len(lowerCAmelCase )} operations while'''
f''' destination module has {len(lowerCAmelCase )}.''' )
for dest_m, src_m in zip(lowerCAmelCase , lowerCAmelCase ):
dest_m.load_state_dict(src_m.state_dict() )
if self.verbose == 1:
print(f'''Transfered from={src_m} to={dest_m}''' )
class lowerCamelCase ( nn.Module ):
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
super().__init__()
UpperCAmelCase_ = []
# - get the stem
feature_blocks.append(("conv1", model.stem) )
# - get all the feature blocks
for k, v in model.trunk_output.named_children():
assert k.startswith("block" ), f'''Unexpected layer name {k}'''
UpperCAmelCase_ = len(lowerCAmelCase ) + 1
feature_blocks.append((f'''res{block_index}''', v) )
UpperCAmelCase_ = nn.ModuleDict(lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
return get_trunk_forward_outputs(
lowerCAmelCase , out_feat_keys=lowerCAmelCase , feature_blocks=self._feature_blocks , )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = x.split("-" )
return x_split[0] + x_split[1] + "_" + "".join(x_split[2:] )
def __getitem__( self , lowerCAmelCase ):
# default to timm!
if x not in self:
UpperCAmelCase_ = self.convert_name_to_timm(lowerCAmelCase )
UpperCAmelCase_ = partial(lambda: (timm.create_model(lowerCAmelCase , pretrained=lowerCAmelCase ).eval(), None) )
else:
UpperCAmelCase_ = super().__getitem__(lowerCAmelCase )
return val
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __getitem__( self , lowerCAmelCase ):
if "seer" in x and "in1k" not in x:
UpperCAmelCase_ = RegNetModel
else:
UpperCAmelCase_ = RegNetForImageClassification
return val
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
for from_key, to_key in keys:
UpperCAmelCase_ = from_state_dict[from_key].clone()
print(f'''Copied key={from_key} to={to_key}''' )
return to_state_dict
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = True , ) -> Optional[Any]:
print(f'''Converting {name}...''' )
with torch.no_grad():
UpperCAmelCase_ , UpperCAmelCase_ = from_model_func()
UpperCAmelCase_ = our_model_func(__SCREAMING_SNAKE_CASE ).eval()
UpperCAmelCase_ = ModuleTransfer(src=__SCREAMING_SNAKE_CASE , dest=__SCREAMING_SNAKE_CASE , raise_if_mismatch=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = torch.randn((1, 3, 224, 224) )
module_transfer(__SCREAMING_SNAKE_CASE )
if from_state_dict is not None:
UpperCAmelCase_ = []
# for seer - in1k finetuned we have to manually copy the head
if "seer" in name and "in1k" in name:
UpperCAmelCase_ = [("0.clf.0.weight", "classifier.1.weight"), ("0.clf.0.bias", "classifier.1.bias")]
UpperCAmelCase_ = manually_copy_vissl_head(__SCREAMING_SNAKE_CASE , our_model.state_dict() , __SCREAMING_SNAKE_CASE )
our_model.load_state_dict(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = our_model(__SCREAMING_SNAKE_CASE , output_hidden_states=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = (
our_outputs.logits if isinstance(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) else our_outputs.last_hidden_state
)
UpperCAmelCase_ = from_model(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = from_output[-1] if type(__SCREAMING_SNAKE_CASE ) is list else from_output
# now since I don't want to use any config files, vissl seer model doesn't actually have an head, so let's just check the last hidden state
if "seer" in name and "in1k" in name:
UpperCAmelCase_ = our_outputs.hidden_states[-1]
assert torch.allclose(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ), "The model logits don't match the original one."
if push_to_hub:
our_model.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add model" , use_temp_dir=__SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ = 224 if "seer" not in name else 384
# we can use the convnext one
UpperCAmelCase_ = AutoImageProcessor.from_pretrained("facebook/convnext-base-224-22k-1k" , size=__SCREAMING_SNAKE_CASE )
image_processor.push_to_hub(
repo_path_or_name=save_directory / name , commit_message="Add image processor" , use_temp_dir=__SCREAMING_SNAKE_CASE , )
print(f'''Pushed {name}''' )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = True ) -> int:
UpperCAmelCase_ = "imagenet-1k-id2label.json"
UpperCAmelCase_ = 1000
UpperCAmelCase_ = (1, num_labels)
UpperCAmelCase_ = "huggingface/label-files"
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = json.load(open(cached_download(hf_hub_url(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , repo_type="dataset" ) ) , "r" ) )
UpperCAmelCase_ = {int(__SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
UpperCAmelCase_ = idalabel
UpperCAmelCase_ = {v: k for k, v in idalabel.items()}
UpperCAmelCase_ = partial(__SCREAMING_SNAKE_CASE , num_labels=__SCREAMING_SNAKE_CASE , idalabel=__SCREAMING_SNAKE_CASE , labelaid=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = {
"regnet-x-002": ImageNetPreTrainedConfig(
depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 , layer_type="x" ),
"regnet-x-004": ImageNetPreTrainedConfig(
depths=[1, 2, 7, 12] , hidden_sizes=[32, 64, 160, 384] , groups_width=16 , layer_type="x" ),
"regnet-x-006": ImageNetPreTrainedConfig(
depths=[1, 3, 5, 7] , hidden_sizes=[48, 96, 240, 528] , groups_width=24 , layer_type="x" ),
"regnet-x-008": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 5] , hidden_sizes=[64, 128, 288, 672] , groups_width=16 , layer_type="x" ),
"regnet-x-016": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 2] , hidden_sizes=[72, 168, 408, 912] , groups_width=24 , layer_type="x" ),
"regnet-x-032": ImageNetPreTrainedConfig(
depths=[2, 6, 15, 2] , hidden_sizes=[96, 192, 432, 1008] , groups_width=48 , layer_type="x" ),
"regnet-x-040": ImageNetPreTrainedConfig(
depths=[2, 5, 14, 2] , hidden_sizes=[80, 240, 560, 1360] , groups_width=40 , layer_type="x" ),
"regnet-x-064": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 392, 784, 1624] , groups_width=56 , layer_type="x" ),
"regnet-x-080": ImageNetPreTrainedConfig(
depths=[2, 5, 15, 1] , hidden_sizes=[80, 240, 720, 1920] , groups_width=120 , layer_type="x" ),
"regnet-x-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 , layer_type="x" ),
"regnet-x-160": ImageNetPreTrainedConfig(
depths=[2, 6, 13, 1] , hidden_sizes=[256, 512, 896, 2048] , groups_width=128 , layer_type="x" ),
"regnet-x-320": ImageNetPreTrainedConfig(
depths=[2, 7, 13, 1] , hidden_sizes=[336, 672, 1344, 2520] , groups_width=168 , layer_type="x" ),
# y variant
"regnet-y-002": ImageNetPreTrainedConfig(depths=[1, 1, 4, 7] , hidden_sizes=[24, 56, 152, 368] , groups_width=8 ),
"regnet-y-004": ImageNetPreTrainedConfig(
depths=[1, 3, 6, 6] , hidden_sizes=[48, 104, 208, 440] , groups_width=8 ),
"regnet-y-006": ImageNetPreTrainedConfig(
depths=[1, 3, 7, 4] , hidden_sizes=[48, 112, 256, 608] , groups_width=16 ),
"regnet-y-008": ImageNetPreTrainedConfig(
depths=[1, 3, 8, 2] , hidden_sizes=[64, 128, 320, 768] , groups_width=16 ),
"regnet-y-016": ImageNetPreTrainedConfig(
depths=[2, 6, 17, 2] , hidden_sizes=[48, 120, 336, 888] , groups_width=24 ),
"regnet-y-032": ImageNetPreTrainedConfig(
depths=[2, 5, 13, 1] , hidden_sizes=[72, 216, 576, 1512] , groups_width=24 ),
"regnet-y-040": ImageNetPreTrainedConfig(
depths=[2, 6, 12, 2] , hidden_sizes=[128, 192, 512, 1088] , groups_width=64 ),
"regnet-y-064": ImageNetPreTrainedConfig(
depths=[2, 7, 14, 2] , hidden_sizes=[144, 288, 576, 1296] , groups_width=72 ),
"regnet-y-080": ImageNetPreTrainedConfig(
depths=[2, 4, 10, 1] , hidden_sizes=[168, 448, 896, 2016] , groups_width=56 ),
"regnet-y-120": ImageNetPreTrainedConfig(
depths=[2, 5, 11, 1] , hidden_sizes=[224, 448, 896, 2240] , groups_width=112 ),
"regnet-y-160": ImageNetPreTrainedConfig(
depths=[2, 4, 11, 1] , hidden_sizes=[224, 448, 1232, 3024] , groups_width=112 ),
"regnet-y-320": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
# models created by SEER -> https://arxiv.org/abs/2202.08360
"regnet-y-320-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer": RegNetConfig(depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer": RegNetConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer": RegNetConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
# finetuned on imagenet
"regnet-y-320-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[232, 696, 1392, 3712] , groups_width=232 ),
"regnet-y-640-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 5, 12, 1] , hidden_sizes=[328, 984, 1968, 4920] , groups_width=328 ),
"regnet-y-1280-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[528, 1056, 2904, 7392] , groups_width=264 ),
"regnet-y-2560-seer-in1k": ImageNetPreTrainedConfig(
depths=[3, 7, 16, 1] , hidden_sizes=[640, 1696, 2544, 5088] , groups_width=640 ),
"regnet-y-10b-seer-in1k": ImageNetPreTrainedConfig(
depths=[2, 7, 17, 1] , hidden_sizes=[2020, 4040, 1_1110, 2_8280] , groups_width=1010 ),
}
UpperCAmelCase_ = NameToOurModelFuncMap()
UpperCAmelCase_ = NameToFromModelFuncMap()
# add seer weights logic
def load_using_classy_vision(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple[nn.Module, Dict]:
UpperCAmelCase_ = torch.hub.load_state_dict_from_url(__SCREAMING_SNAKE_CASE , model_dir=str(__SCREAMING_SNAKE_CASE ) , map_location="cpu" )
UpperCAmelCase_ = model_func()
# check if we have a head, if yes add it
UpperCAmelCase_ = files["classy_state_dict"]["base_model"]["model"]
UpperCAmelCase_ = model_state_dict["trunk"]
model.load_state_dict(__SCREAMING_SNAKE_CASE )
return model.eval(), model_state_dict["heads"]
# pretrained
UpperCAmelCase_ = partial(
__SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet32d/seer_regnet32gf_model_iteration244000.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ = partial(
__SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet64/seer_regnet64gf_model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ = partial(
__SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/swav_ig1b_regnet128Gf_cnstant_bs32_node16_sinkhorn10_proto16k_syncBN64_warmup8k/model_final_checkpoint_phase0.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase_ = partial(
__SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_regnet10B/model_iteration124500_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
# IN1K finetuned
UpperCAmelCase_ = partial(
__SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet32_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ = partial(
__SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet64_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaagf() ) , )
UpperCAmelCase_ = partial(
__SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_regnet128_finetuned_in1k_model_final_checkpoint_phase78.torch" , lambda: FakeRegNetVisslWrapper(RegNetYaaagf() ) , )
UpperCAmelCase_ = partial(
__SCREAMING_SNAKE_CASE , "https://dl.fbaipublicfiles.com/vissl/model_zoo/seer_finetuned/seer_10b_finetuned_in1k_model_phase28_conso.torch" , lambda: FakeRegNetVisslWrapper(
RegNet(RegNetParams(depth=27 , group_width=1010 , w_a=1744 , w_a=620.83 , w_m=2.52 ) ) ) , )
if model_name:
convert_weight_and_push(
__SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , names_to_config[model_name] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
else:
for model_name, config in names_to_config.items():
convert_weight_and_push(
__SCREAMING_SNAKE_CASE , names_to_from_model_map[model_name] , names_to_ours_model_map[model_name] , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , )
return config, expected_shape
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--model_name",
default=None,
type=str,
help=(
"The name of the model you wish to convert, it must be one of the supported regnet* architecture,"
" currently: regnetx-*, regnety-*. If `None`, all of them will the converted."
),
)
parser.add_argument(
"--pytorch_dump_folder_path",
default=None,
type=Path,
required=True,
help="Path to the output PyTorch model directory.",
)
parser.add_argument(
"--push_to_hub",
default=True,
type=bool,
required=False,
help="If True, push model and image processor to the hub.",
)
SCREAMING_SNAKE_CASE = parser.parse_args()
SCREAMING_SNAKE_CASE = args.pytorch_dump_folder_path
pytorch_dump_folder_path.mkdir(exist_ok=True, parents=True)
convert_weights_and_push(pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 717 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase ).to(lowerCAmelCase )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = model(input_ids.to(lowerCAmelCase ) , labels=labels.to(lowerCAmelCase ) ).loss
UpperCAmelCase_ = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 23 | 0 |
import re
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list:
return [char.split() for char in re.split(R"[^ a-z A-Z 0-9 \s]" , str_ )]
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = split_input(str_ )
return "".join(
["".join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
try:
UpperCAmelCase_ = split_input(__SCREAMING_SNAKE_CASE )
if upper:
UpperCAmelCase_ = "".join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
UpperCAmelCase_ = "".join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
return to_simple_case(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
try:
UpperCAmelCase_ = to_simple_case(__SCREAMING_SNAKE_CASE )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
return to_complex_case(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , "_" )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
return to_complex_case(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , "-" )
if __name__ == "__main__":
__import__("doctest").testmod()
| 718 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
UpperCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 23 | 0 |
'''simple docstring'''
import inspect
import os
import unittest
from pathlib import Path
import torch
import accelerate
from accelerate.test_utils import execute_subprocess_async
from accelerate.test_utils.testing import run_command
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[int] = inspect.getfile(accelerate.test_utils )
lowerCAmelCase_ : Union[str, Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ['scripts', 'test_cli.py'] )
lowerCAmelCase_ : Dict = ['accelerate', 'launch']
lowerCAmelCase_ : int = Path.home() / '.cache/huggingface/accelerate'
lowerCAmelCase_ : str = 'default_config.yaml'
lowerCAmelCase_ : Dict = config_folder / config_file
lowerCAmelCase_ : Tuple = config_folder / '_default_config.yaml'
lowerCAmelCase_ : Dict = Path('tests/test_configs' )
@classmethod
def A__ ( cls ):
if cls.config_path.is_file():
cls.config_path.rename(cls.changed_path )
@classmethod
def A__ ( cls ):
if cls.changed_path.is_file():
cls.changed_path.rename(cls.config_path )
def A__ ( self ):
UpperCAmelCase_ = self.base_cmd
if torch.cuda.is_available() and (torch.cuda.device_count() > 1):
cmd += ["--multi_gpu"]
execute_subprocess_async(cmd + [self.test_file_path] , env=os.environ.copy() )
def A__ ( self ):
for config in sorted(self.test_config_path.glob("**/*.yaml" ) ):
with self.subTest(config_file=lowerCAmelCase ):
execute_subprocess_async(
self.base_cmd + ["--config_file", str(lowerCAmelCase ), self.test_file_path] , env=os.environ.copy() )
def A__ ( self ):
execute_subprocess_async(["accelerate", "test"] , env=os.environ.copy() )
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Any = 'test-tpu'
lowerCAmelCase_ : List[Any] = 'us-central1-a'
lowerCAmelCase_ : int = 'ls'
lowerCAmelCase_ : Optional[int] = ['accelerate', 'tpu-config']
lowerCAmelCase_ : str = 'cd /usr/share'
lowerCAmelCase_ : Optional[int] = 'tests/test_samples/test_command_file.sh'
lowerCAmelCase_ : List[str] = 'Running gcloud compute tpus tpu-vm ssh'
def A__ ( self ):
UpperCAmelCase_ = run_command(
self.cmd
+ ["--command", self.command, "--tpu_zone", self.tpu_zone, "--tpu_name", self.tpu_name, "--debug"] , return_stdout=lowerCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCAmelCase , )
def A__ ( self ):
UpperCAmelCase_ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command",
self.command,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=lowerCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCAmelCase , )
def A__ ( self ):
UpperCAmelCase_ = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--debug"] , return_stdout=lowerCAmelCase )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCAmelCase , )
def A__ ( self ):
UpperCAmelCase_ = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--command", self.command, "--debug"] , return_stdout=lowerCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls --worker all''' , lowerCAmelCase , )
def A__ ( self ):
UpperCAmelCase_ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--command",
self.command,
"--command",
"echo \"Hello World\"",
"--debug",
] , return_stdout=lowerCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; ls; echo "Hello World" --worker all''' , lowerCAmelCase , )
def A__ ( self ):
UpperCAmelCase_ = run_command(
self.cmd
+ ["--config_file", "tests/test_configs/latest.yaml", "--command_file", self.command_file, "--debug"] , return_stdout=lowerCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCAmelCase , )
def A__ ( self ):
UpperCAmelCase_ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/0_12_0.yaml",
"--command_file",
self.command_file,
"--tpu_zone",
self.tpu_zone,
"--tpu_name",
self.tpu_name,
"--debug",
] , return_stdout=lowerCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; echo "hello world"; echo "this is a second command" --worker all''' , lowerCAmelCase , )
def A__ ( self ):
UpperCAmelCase_ = run_command(
self.cmd + ["--config_file", "tests/test_configs/latest.yaml", "--install_accelerate", "--debug"] , return_stdout=lowerCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate -U; echo "hello world"; echo "this is a second command" --worker all''' , lowerCAmelCase , )
def A__ ( self ):
UpperCAmelCase_ = run_command(
self.cmd
+ [
"--config_file",
"tests/test_configs/latest.yaml",
"--install_accelerate",
"--accelerate_version",
"12.0.0",
"--debug",
] , return_stdout=lowerCAmelCase , )
self.assertIn(
f'''{self.gcloud} test-tpu --zone us-central1-a --command {self.base_output}; pip install accelerate==12.0.0; echo "hello world"; echo "this is a second command" --worker all''' , lowerCAmelCase , )
| 719 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[List[ImageInput]]:
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ['pixel_values']
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size["shortest_edge"] , default_to_square=lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(lowerCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(lowerCAmelCase , size=lowerCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase )
return image
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(lowerCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=lowerCAmelCase , do_resize=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , do_center_crop=lowerCAmelCase , crop_size=lowerCAmelCase , do_rescale=lowerCAmelCase , rescale_factor=lowerCAmelCase , do_normalize=lowerCAmelCase , image_mean=lowerCAmelCase , image_std=lowerCAmelCase , data_format=lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 23 | 0 |
from __future__ import annotations
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list[int]:
UpperCAmelCase_ = 2
UpperCAmelCase_ = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(__SCREAMING_SNAKE_CASE )
if n > 1:
factors.append(__SCREAMING_SNAKE_CASE )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 0
while number > 0:
UpperCAmelCase_ = number % 10
sum_of_digits += last_digit
UpperCAmelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case__ ( __SCREAMING_SNAKE_CASE = 100 ) -> int:
UpperCAmelCase_ = factorial(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = split_and_add(__SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 23 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
"configuration_megatron_bert": ["MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegatronBertConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegatronBertForCausalLM",
"MegatronBertForMaskedLM",
"MegatronBertForMultipleChoice",
"MegatronBertForNextSentencePrediction",
"MegatronBertForPreTraining",
"MegatronBertForQuestionAnswering",
"MegatronBertForSequenceClassification",
"MegatronBertForTokenClassification",
"MegatronBertModel",
"MegatronBertPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 721 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> tuple:
return (data["data"], data["target"])
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> XGBClassifier:
UpperCAmelCase_ = XGBClassifier()
classifier.fit(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return classifier
def snake_case__ ( ) -> None:
UpperCAmelCase_ = load_iris()
UpperCAmelCase_ , UpperCAmelCase_ = data_handling(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = train_test_split(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , test_size=0.25 )
UpperCAmelCase_ = iris["target_names"]
# Create an XGBoost Classifier from the training data
UpperCAmelCase_ = xgboost(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , display_labels=__SCREAMING_SNAKE_CASE , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 700 |
import argparse
import torch
from transformers import MobileBertConfig, MobileBertForPreTraining, load_tf_weights_in_mobilebert
from transformers.utils import logging
logging.set_verbosity_info()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> List[Any]:
# Initialise PyTorch model
UpperCAmelCase_ = MobileBertConfig.from_json_file(__SCREAMING_SNAKE_CASE )
print(f'''Building PyTorch model from configuration: {config}''' )
UpperCAmelCase_ = MobileBertForPreTraining(__SCREAMING_SNAKE_CASE )
# Load weights from tf checkpoint
UpperCAmelCase_ = load_tf_weights_in_mobilebert(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , __SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--tf_checkpoint_path", default=None, type=str, required=True, help="Path to the TensorFlow checkpoint path."
)
parser.add_argument(
"--mobilebert_config_file",
default=None,
type=str,
required=True,
help=(
"The config json file corresponding to the pre-trained MobileBERT model. \n"
"This specifies the model architecture."
),
)
parser.add_argument(
"--pytorch_dump_path", default=None, type=str, required=True, help="Path to the output PyTorch model."
)
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.mobilebert_config_file, args.pytorch_dump_path)
| 23 | 0 |
'''simple docstring'''
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> bool:
UpperCAmelCase_ = 0
for ch in input_str:
UpperCAmelCase_ = ord(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = pow(2 , __SCREAMING_SNAKE_CASE )
# If we already turned on bit for current character's unicode
if bitmap >> ch_unicode & 1 == 1:
return False
bitmap |= ch_bit_index_on
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 701 |
import heapq as hq
import math
from collections.abc import Iterator
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase ):
UpperCAmelCase_ = str(id_ )
UpperCAmelCase_ = None
UpperCAmelCase_ = None
UpperCAmelCase_ = []
UpperCAmelCase_ = {} # {vertex:distance}
def __lt__( self , lowerCAmelCase ):
return self.key < other.key
def __repr__( self ):
return self.id
def A__ ( self , lowerCAmelCase ):
self.neighbors.append(lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = weight
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[int]:
# add the neighbors:
graph[a - 1].add_neighbor(graph[b - 1] )
graph[b - 1].add_neighbor(graph[a - 1] )
# add the edges:
graph[a - 1].add_edge(graph[b - 1] , __SCREAMING_SNAKE_CASE )
graph[b - 1].add_edge(graph[a - 1] , __SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> list:
UpperCAmelCase_ = []
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = graph[:]
while q:
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE )
q.remove(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in q) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
a.append((int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1) )
return a
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Iterator[tuple]:
for u in graph:
UpperCAmelCase_ = math.inf
UpperCAmelCase_ = None
UpperCAmelCase_ = 0
UpperCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
hq.heapify(__SCREAMING_SNAKE_CASE )
while h:
UpperCAmelCase_ = hq.heappop(__SCREAMING_SNAKE_CASE )
for v in u.neighbors:
if (v in h) and (u.edges[v.id] < v.key):
UpperCAmelCase_ = u
UpperCAmelCase_ = u.edges[v.id]
hq.heapify(__SCREAMING_SNAKE_CASE )
for i in range(1 , len(__SCREAMING_SNAKE_CASE ) ):
yield (int(graph[i].id ) + 1, int(graph[i].pi.id ) + 1)
def snake_case__ ( ) -> None:
pass
if __name__ == "__main__":
import doctest
doctest.testmod()
| 23 | 0 |
import gc
import unittest
from diffusers import FlaxDPMSolverMultistepScheduler, FlaxStableDiffusionPipeline
from diffusers.utils import is_flax_available, slow
from diffusers.utils.testing_utils import require_flax
if is_flax_available():
import jax
import jax.numpy as jnp
from flax.jax_utils import replicate
from flax.training.common_utils import shard
@slow
@require_flax
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
"stabilityai/stable-diffusion-2" , revision="bf16" , dtype=jnp.bfloataa , )
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = jax.device_count()
UpperCAmelCase_ = num_samples * [prompt]
UpperCAmelCase_ = sd_pipe.prepare_inputs(lowerCAmelCase )
UpperCAmelCase_ = replicate(lowerCAmelCase )
UpperCAmelCase_ = shard(lowerCAmelCase )
UpperCAmelCase_ = jax.random.PRNGKey(0 )
UpperCAmelCase_ = jax.random.split(lowerCAmelCase , jax.device_count() )
UpperCAmelCase_ = sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCAmelCase_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase_ = images[0, 253:256, 253:256, -1]
UpperCAmelCase_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase_ = jnp.array([0.4238, 0.4414, 0.4395, 0.4453, 0.4629, 0.4590, 0.4531, 0.45508, 0.4512] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
def A__ ( self ):
UpperCAmelCase_ = "stabilityai/stable-diffusion-2"
UpperCAmelCase_ , UpperCAmelCase_ = FlaxDPMSolverMultistepScheduler.from_pretrained(lowerCAmelCase , subfolder="scheduler" )
UpperCAmelCase_ , UpperCAmelCase_ = FlaxStableDiffusionPipeline.from_pretrained(
lowerCAmelCase , scheduler=lowerCAmelCase , revision="bf16" , dtype=jnp.bfloataa , )
UpperCAmelCase_ = scheduler_params
UpperCAmelCase_ = "A painting of a squirrel eating a burger"
UpperCAmelCase_ = jax.device_count()
UpperCAmelCase_ = num_samples * [prompt]
UpperCAmelCase_ = sd_pipe.prepare_inputs(lowerCAmelCase )
UpperCAmelCase_ = replicate(lowerCAmelCase )
UpperCAmelCase_ = shard(lowerCAmelCase )
UpperCAmelCase_ = jax.random.PRNGKey(0 )
UpperCAmelCase_ = jax.random.split(lowerCAmelCase , jax.device_count() )
UpperCAmelCase_ = sd_pipe(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , num_inference_steps=25 , jit=lowerCAmelCase )[0]
assert images.shape == (jax.device_count(), 1, 768, 768, 3)
UpperCAmelCase_ = images.reshape((images.shape[0] * images.shape[1],) + images.shape[-3:] )
UpperCAmelCase_ = images[0, 253:256, 253:256, -1]
UpperCAmelCase_ = jnp.asarray(jax.device_get(image_slice.flatten() ) )
UpperCAmelCase_ = jnp.array([0.4336, 0.42969, 0.4453, 0.4199, 0.4297, 0.4531, 0.4434, 0.4434, 0.4297] )
print(f'''output_slice: {output_slice}''' )
assert jnp.abs(output_slice - expected_slice ).max() < 1e-2
| 702 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_fnet": ["FNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "FNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["FNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"FNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"FNetForMaskedLM",
"FNetForMultipleChoice",
"FNetForNextSentencePrediction",
"FNetForPreTraining",
"FNetForQuestionAnswering",
"FNetForSequenceClassification",
"FNetForTokenClassification",
"FNetLayer",
"FNetModel",
"FNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
UpperCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 703 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ....tokenization_utils_fast import PreTrainedTokenizerFast
from ....utils import logging
from .tokenization_retribert import RetriBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"yjernite/retribert-base-uncased": (
"https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": 512,
}
SCREAMING_SNAKE_CASE = {
"yjernite/retribert-base-uncased": {"do_lower_case": True},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = VOCAB_FILES_NAMES
lowerCAmelCase_ : int = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Dict = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : List[str] = RetriBertTokenizer
lowerCAmelCase_ : Union[str, Any] = ['input_ids', 'attention_mask']
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 0 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModel,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableUnCLIPImgaImgPipeline, UNetaDConditionModel
from diffusers.pipelines.pipeline_utils import DiffusionPipeline
from diffusers.pipelines.stable_diffusion.stable_unclip_image_normalizer import StableUnCLIPImageNormalizer
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import (
enable_full_determinism,
floats_tensor,
load_image,
load_numpy,
require_torch_gpu,
skip_mps,
slow,
torch_device,
)
from ..pipeline_params import TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS, TEXT_GUIDED_IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import (
PipelineKarrasSchedulerTesterMixin,
PipelineLatentTesterMixin,
PipelineTesterMixin,
assert_mean_pixel_difference,
)
enable_full_determinism()
class lowerCamelCase ( lowercase__, lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Dict = StableUnCLIPImgaImgPipeline
lowerCAmelCase_ : List[Any] = TEXT_GUIDED_IMAGE_VARIATION_PARAMS
lowerCAmelCase_ : int = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
lowerCAmelCase_ : Any = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
lowerCAmelCase_ : Optional[int] = frozenset([] )
def A__ ( self ):
UpperCAmelCase_ = 32
UpperCAmelCase_ = embedder_hidden_size
# image encoding components
UpperCAmelCase_ = CLIPImageProcessor(crop_size=32 , size=32 )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPVisionModelWithProjection(
CLIPVisionConfig(
hidden_size=lowerCAmelCase , projection_dim=lowerCAmelCase , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , ) )
# regular denoising components
torch.manual_seed(0 )
UpperCAmelCase_ = StableUnCLIPImageNormalizer(embedding_dim=lowerCAmelCase )
UpperCAmelCase_ = DDPMScheduler(beta_schedule="squaredcos_cap_v2" )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextModel(
CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=lowerCAmelCase , projection_dim=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , ) )
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock2D", "DownBlock2D") , up_block_types=("UpBlock2D", "CrossAttnUpBlock2D") , block_out_channels=(32, 64) , attention_head_dim=(2, 4) , class_embed_type="projection" , projection_class_embeddings_input_dim=embedder_projection_dim * 2 , cross_attention_dim=lowerCAmelCase , layers_per_block=1 , upcast_attention=lowerCAmelCase , use_linear_projection=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ = DDIMScheduler(
beta_schedule="scaled_linear" , beta_start=0.00085 , beta_end=0.012 , prediction_type="v_prediction" , set_alpha_to_one=lowerCAmelCase , steps_offset=1 , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL()
UpperCAmelCase_ = {
# image encoding components
"feature_extractor": feature_extractor,
"image_encoder": image_encoder.eval(),
# image noising components
"image_normalizer": image_normalizer.eval(),
"image_noising_scheduler": image_noising_scheduler,
# regular denoising components
"tokenizer": tokenizer,
"text_encoder": text_encoder.eval(),
"unet": unet.eval(),
"scheduler": scheduler,
"vae": vae.eval(),
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 , lowerCAmelCase=True ):
if str(lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase_ = floats_tensor((1, 3, 32, 32) , rng=random.Random(lowerCAmelCase ) ).to(lowerCAmelCase )
if pil_image:
UpperCAmelCase_ = input_image * 0.5 + 0.5
UpperCAmelCase_ = input_image.clamp(0 , 1 )
UpperCAmelCase_ = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
UpperCAmelCase_ = DiffusionPipeline.numpy_to_pil(lowerCAmelCase )[0]
return {
"prompt": "An anime racoon running a marathon",
"image": input_image,
"generator": generator,
"num_inference_steps": 2,
"output_type": "np",
}
@skip_mps
def A__ ( self ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = StableUnCLIPImgaImgPipeline(**lowerCAmelCase )
UpperCAmelCase_ = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs(lowerCAmelCase )
inputs.update({"image_embeds": None} )
UpperCAmelCase_ = sd_pipe(**lowerCAmelCase ).images
UpperCAmelCase_ = image[0, -3:, -3:, -1]
assert image.shape == (1, 32, 32, 3)
UpperCAmelCase_ = np.array([0.3872, 0.7224, 0.5601, 0.4741, 0.6872, 0.5814, 0.4636, 0.3867, 0.5078] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def A__ ( self ):
UpperCAmelCase_ = torch_device in ["cpu", "mps"]
self._test_attention_slicing_forward_pass(test_max_difference=lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = torch_device in ["cpu", "mps"]
self._test_inference_batch_single_identical(test_max_difference=lowerCAmelCase )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_max_difference=lowerCAmelCase )
@slow
@require_torch_gpu
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def A__ ( self ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_l_img2img_anime_turtle_fp16.npy" )
UpperCAmelCase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-l-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , "anime turle" , generator=lowerCAmelCase , output_type="np" )
UpperCAmelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/stable_unclip_2_1_h_img2img_anime_turtle_fp16.npy" )
UpperCAmelCase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
# stable unclip will oom when integration tests are run on a V100,
# so turn on memory savings
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , "anime turle" , generator=lowerCAmelCase , output_type="np" )
UpperCAmelCase_ = output.images[0]
assert image.shape == (768, 768, 3)
assert_mean_pixel_difference(lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = load_image(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/stable_unclip/turtle.png" )
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
UpperCAmelCase_ = StableUnCLIPImgaImgPipeline.from_pretrained(
"fusing/stable-unclip-2-1-h-img2img" , torch_dtype=torch.floataa )
UpperCAmelCase_ = pipe.to(lowerCAmelCase )
pipe.set_progress_bar_config(disable=lowerCAmelCase )
pipe.enable_attention_slicing()
pipe.enable_sequential_cpu_offload()
UpperCAmelCase_ = pipe(
lowerCAmelCase , "anime turtle" , num_inference_steps=2 , output_type="np" , )
UpperCAmelCase_ = torch.cuda.max_memory_allocated()
# make sure that less than 7 GB is allocated
assert mem_bytes < 7 * 10**9
| 704 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
SCREAMING_SNAKE_CASE = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": 512,
"distilbert-base-uncased-distilled-squad": 512,
"distilbert-base-cased": 512,
"distilbert-base-cased-distilled-squad": 512,
"distilbert-base-german-cased": 512,
"distilbert-base-multilingual-cased": 512,
}
SCREAMING_SNAKE_CASE = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : Any = VOCAB_FILES_NAMES
lowerCAmelCase_ : List[str] = PRETRAINED_VOCAB_FILES_MAP
lowerCAmelCase_ : str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
lowerCAmelCase_ : Union[str, Any] = PRETRAINED_INIT_CONFIGURATION
lowerCAmelCase_ : int = ['input_ids', 'attention_mask']
lowerCAmelCase_ : str = DistilBertTokenizer
def __init__( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase="[UNK]" , lowerCAmelCase="[SEP]" , lowerCAmelCase="[PAD]" , lowerCAmelCase="[CLS]" , lowerCAmelCase="[MASK]" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(
lowerCAmelCase , tokenizer_file=lowerCAmelCase , do_lower_case=lowerCAmelCase , unk_token=lowerCAmelCase , sep_token=lowerCAmelCase , pad_token=lowerCAmelCase , cls_token=lowerCAmelCase , mask_token=lowerCAmelCase , tokenize_chinese_chars=lowerCAmelCase , strip_accents=lowerCAmelCase , **lowerCAmelCase , )
UpperCAmelCase_ = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowerCAmelCase ) != do_lower_case
or normalizer_state.get("strip_accents" , lowerCAmelCase ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowerCAmelCase ) != tokenize_chinese_chars
):
UpperCAmelCase_ = getattr(lowerCAmelCase , normalizer_state.pop("type" ) )
UpperCAmelCase_ = do_lower_case
UpperCAmelCase_ = strip_accents
UpperCAmelCase_ = tokenize_chinese_chars
UpperCAmelCase_ = normalizer_class(**lowerCAmelCase )
UpperCAmelCase_ = do_lower_case
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = [self.sep_token_id]
UpperCAmelCase_ = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None ):
UpperCAmelCase_ = self._tokenizer.model.save(lowerCAmelCase , name=lowerCAmelCase )
return tuple(lowerCAmelCase )
| 23 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=7 , lowerCAmelCase=3 , lowerCAmelCase=30 , lowerCAmelCase=400 , lowerCAmelCase=True , lowerCAmelCase=None , lowerCAmelCase=True , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=[0.5, 0.5, 0.5] , lowerCAmelCase=True , lowerCAmelCase=1 / 255 , lowerCAmelCase=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 18, "longest_edge": 1333}
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = min_resolution
UpperCAmelCase_ = max_resolution
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean
UpperCAmelCase_ = image_std
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_pad
def A__ ( self ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def A__ ( self , lowerCAmelCase , lowerCAmelCase=False ):
if not batched:
UpperCAmelCase_ = image_inputs[0]
if isinstance(lowerCAmelCase , Image.Image ):
UpperCAmelCase_ , UpperCAmelCase_ = image.size
else:
UpperCAmelCase_ , UpperCAmelCase_ = image.shape[1], image.shape[2]
if w < h:
UpperCAmelCase_ = int(self.size["shortest_edge"] * h / w )
UpperCAmelCase_ = self.size["shortest_edge"]
elif w > h:
UpperCAmelCase_ = self.size["shortest_edge"]
UpperCAmelCase_ = int(self.size["shortest_edge"] * w / h )
else:
UpperCAmelCase_ = self.size["shortest_edge"]
UpperCAmelCase_ = self.size["shortest_edge"]
else:
UpperCAmelCase_ = []
for image in image_inputs:
UpperCAmelCase_ , UpperCAmelCase_ = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
UpperCAmelCase_ = max(lowerCAmelCase , key=lambda lowerCAmelCase : item[0] )[0]
UpperCAmelCase_ = max(lowerCAmelCase , key=lambda lowerCAmelCase : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = ConditionalDetrImageProcessor if is_vision_available() else None
def A__ ( self ):
UpperCAmelCase_ = ConditionalDetrImageProcessingTester(self )
@property
def A__ ( self ):
return self.image_processor_tester.prepare_image_processor_dict()
def A__ ( self ):
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowerCAmelCase , "image_mean" ) )
self.assertTrue(hasattr(lowerCAmelCase , "image_std" ) )
self.assertTrue(hasattr(lowerCAmelCase , "do_normalize" ) )
self.assertTrue(hasattr(lowerCAmelCase , "do_resize" ) )
self.assertTrue(hasattr(lowerCAmelCase , "size" ) )
def A__ ( self ):
UpperCAmelCase_ = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 18, "longest_edge": 1333} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase )
UpperCAmelCase_ = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=lowerCAmelCase )
self.assertEqual(image_processor.size , {"shortest_edge": 42, "longest_edge": 84} )
self.assertEqual(image_processor.do_pad , lowerCAmelCase )
def A__ ( self ):
pass
def A__ ( self ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , Image.Image )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
UpperCAmelCase_ = image_processing(lowerCAmelCase , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , numpify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , np.ndarray )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(lowerCAmelCase , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def A__ ( self ):
# Initialize image_processing
UpperCAmelCase_ = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
UpperCAmelCase_ = prepare_image_inputs(self.image_processor_tester , equal_resolution=lowerCAmelCase , torchify=lowerCAmelCase )
for image in image_inputs:
self.assertIsInstance(lowerCAmelCase , torch.Tensor )
# Test not batched input
UpperCAmelCase_ = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
UpperCAmelCase_ = image_processing(lowerCAmelCase , return_tensors="pt" ).pixel_values
UpperCAmelCase_ , UpperCAmelCase_ = self.image_processor_tester.get_expected_values(lowerCAmelCase , batched=lowerCAmelCase )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def A__ ( self ):
# prepare image and target
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = {"image_id": 3_9769, "annotations": target}
# encode them
UpperCAmelCase_ = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
UpperCAmelCase_ = image_processing(images=lowerCAmelCase , annotations=lowerCAmelCase , return_tensors="pt" )
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase , atol=1e-4 ) )
# verify area
UpperCAmelCase_ = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase ) )
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase , atol=1e-3 ) )
# verify image_id
UpperCAmelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase ) )
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase ) )
# verify class_labels
UpperCAmelCase_ = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase ) )
# verify orig_size
UpperCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase ) )
# verify size
UpperCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase ) )
@slow
def A__ ( self ):
# prepare image, target and masks_path
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
UpperCAmelCase_ = json.loads(f.read() )
UpperCAmelCase_ = {"file_name": "000000039769.png", "image_id": 3_9769, "segments_info": target}
UpperCAmelCase_ = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
UpperCAmelCase_ = ConditionalDetrImageProcessor(format="coco_panoptic" )
UpperCAmelCase_ = image_processing(images=lowerCAmelCase , annotations=lowerCAmelCase , masks_path=lowerCAmelCase , return_tensors="pt" )
# verify pixel values
UpperCAmelCase_ = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["pixel_values"].shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , lowerCAmelCase , atol=1e-4 ) )
# verify area
UpperCAmelCase_ = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , lowerCAmelCase ) )
# verify boxes
UpperCAmelCase_ = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , lowerCAmelCase , atol=1e-3 ) )
# verify image_id
UpperCAmelCase_ = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , lowerCAmelCase ) )
# verify is_crowd
UpperCAmelCase_ = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , lowerCAmelCase ) )
# verify class_labels
UpperCAmelCase_ = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , lowerCAmelCase ) )
# verify masks
UpperCAmelCase_ = 82_2873
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , lowerCAmelCase )
# verify orig_size
UpperCAmelCase_ = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , lowerCAmelCase ) )
# verify size
UpperCAmelCase_ = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , lowerCAmelCase ) )
| 705 |
import datetime
import platform
import subprocess
from typing import Optional, Tuple, Union
import numpy as np
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.array:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
UpperCAmelCase_ = "f32le"
UpperCAmelCase_ = [
"ffmpeg",
"-i",
"pipe:0",
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdin=subprocess.PIPE , stdout=subprocess.PIPE ) as ffmpeg_process:
UpperCAmelCase_ = ffmpeg_process.communicate(__SCREAMING_SNAKE_CASE )
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to load audio files from filename" ) from error
UpperCAmelCase_ = output_stream[0]
UpperCAmelCase_ = np.frombuffer(__SCREAMING_SNAKE_CASE , np.floataa )
if audio.shape[0] == 0:
raise ValueError("Malformed soundfile" )
return audio
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = "f32le" , ) -> Dict:
UpperCAmelCase_ = f'''{sampling_rate}'''
UpperCAmelCase_ = "1"
if format_for_conversion == "s16le":
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
UpperCAmelCase_ = platform.system()
if system == "Linux":
UpperCAmelCase_ = "alsa"
UpperCAmelCase_ = "default"
elif system == "Darwin":
UpperCAmelCase_ = "avfoundation"
UpperCAmelCase_ = ":0"
elif system == "Windows":
UpperCAmelCase_ = "dshow"
UpperCAmelCase_ = "default"
UpperCAmelCase_ = [
"ffmpeg",
"-f",
format_,
"-i",
input_,
"-ac",
ac,
"-ar",
ar,
"-f",
format_for_conversion,
"-fflags",
"nobuffer",
"-hide_banner",
"-loglevel",
"quiet",
"pipe:1",
]
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
UpperCAmelCase_ = _ffmpeg_stream(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
for item in iterator:
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = None , __SCREAMING_SNAKE_CASE = "f32le" , ) -> int:
if stream_chunk_s is not None:
UpperCAmelCase_ = stream_chunk_s
else:
UpperCAmelCase_ = chunk_length_s
UpperCAmelCase_ = ffmpeg_microphone(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , format_for_conversion=__SCREAMING_SNAKE_CASE )
if format_for_conversion == "s16le":
UpperCAmelCase_ = np.intaa
UpperCAmelCase_ = 2
elif format_for_conversion == "f32le":
UpperCAmelCase_ = np.floataa
UpperCAmelCase_ = 4
else:
raise ValueError(f'''Unhandled format `{format_for_conversion}`. Please use `s16le` or `f32le`''' )
if stride_length_s is None:
UpperCAmelCase_ = chunk_length_s / 6
UpperCAmelCase_ = int(round(sampling_rate * chunk_length_s ) ) * size_of_sample
if isinstance(__SCREAMING_SNAKE_CASE , (int, float) ):
UpperCAmelCase_ = [stride_length_s, stride_length_s]
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[0] ) ) * size_of_sample
UpperCAmelCase_ = int(round(sampling_rate * stride_length_s[1] ) ) * size_of_sample
UpperCAmelCase_ = datetime.datetime.now()
UpperCAmelCase_ = datetime.timedelta(seconds=__SCREAMING_SNAKE_CASE )
for item in chunk_bytes_iter(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , stride=(stride_left, stride_right) , stream=__SCREAMING_SNAKE_CASE ):
# Put everything back in numpy scale
UpperCAmelCase_ = np.frombuffer(item["raw"] , dtype=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = (
item["stride"][0] // size_of_sample,
item["stride"][1] // size_of_sample,
)
UpperCAmelCase_ = sampling_rate
audio_time += delta
if datetime.datetime.now() > audio_time + 10 * delta:
# We're late !! SKIP
continue
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = False ) -> Dict:
UpperCAmelCase_ = B""
UpperCAmelCase_ , UpperCAmelCase_ = stride
if stride_left + stride_right >= chunk_len:
raise ValueError(
f'''Stride needs to be strictly smaller than chunk_len: ({stride_left}, {stride_right}) vs {chunk_len}''' )
UpperCAmelCase_ = 0
for raw in iterator:
acc += raw
if stream and len(__SCREAMING_SNAKE_CASE ) < chunk_len:
UpperCAmelCase_ = (_stride_left, 0)
yield {"raw": acc[:chunk_len], "stride": stride, "partial": True}
else:
while len(__SCREAMING_SNAKE_CASE ) >= chunk_len:
# We are flushing the accumulator
UpperCAmelCase_ = (_stride_left, stride_right)
UpperCAmelCase_ = {"raw": acc[:chunk_len], "stride": stride}
if stream:
UpperCAmelCase_ = False
yield item
UpperCAmelCase_ = stride_left
UpperCAmelCase_ = acc[chunk_len - stride_left - stride_right :]
# Last chunk
if len(__SCREAMING_SNAKE_CASE ) > stride_left:
UpperCAmelCase_ = {"raw": acc, "stride": (_stride_left, 0)}
if stream:
UpperCAmelCase_ = False
yield item
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
UpperCAmelCase_ = 2**24 # 16Mo
try:
with subprocess.Popen(__SCREAMING_SNAKE_CASE , stdout=subprocess.PIPE , bufsize=__SCREAMING_SNAKE_CASE ) as ffmpeg_process:
while True:
UpperCAmelCase_ = ffmpeg_process.stdout.read(__SCREAMING_SNAKE_CASE )
if raw == b"":
break
yield raw
except FileNotFoundError as error:
raise ValueError("ffmpeg was not found but is required to stream audio files from filename" ) from error
| 23 | 0 |
import unittest
from parameterized import parameterized
from transformers import AutoTokenizer, GPTNeoXConfig, is_torch_available, set_seed
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
GPTNeoXModel,
)
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=64 , lowerCAmelCase=5 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=16 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=3 , lowerCAmelCase=4 , lowerCAmelCase=None , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = type_sequence_label_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = num_choices
UpperCAmelCase_ = scope
UpperCAmelCase_ = vocab_size - 1
def A__ ( self ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, input_ids, input_mask, token_labels
def A__ ( self ):
return GPTNeoXConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=lowerCAmelCase , initializer_range=self.initializer_range , pad_token_id=self.pad_token_id , )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ = True
return config, input_ids, input_mask, token_labels
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = GPTNeoXModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = True
UpperCAmelCase_ = GPTNeoXModel(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = GPTNeoXForCausalLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = GPTNeoXForQuestionAnswering(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = GPTNeoXForSequenceClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = GPTNeoXForTokenClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = True
UpperCAmelCase_ = GPTNeoXForCausalLM(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
# first forward pass
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , use_cache=lowerCAmelCase )
UpperCAmelCase_ = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , config.vocab_size )
UpperCAmelCase_ = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
UpperCAmelCase_ = torch.cat([input_ids, next_tokens] , dim=-1 )
UpperCAmelCase_ = torch.cat([input_mask, next_mask] , dim=-1 )
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase , output_hidden_states=lowerCAmelCase )
UpperCAmelCase_ = output_from_no_past["hidden_states"][0]
UpperCAmelCase_ = model(
lowerCAmelCase , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , output_hidden_states=lowerCAmelCase , )["hidden_states"][0]
# select random slice
UpperCAmelCase_ = ids_tensor((1,) , output_from_past.shape[-1] ).item()
UpperCAmelCase_ = output_from_no_past[:, -3:, random_slice_idx].detach()
UpperCAmelCase_ = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 ) )
def A__ ( self ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = (
(
GPTNeoXModel,
GPTNeoXForCausalLM,
GPTNeoXForQuestionAnswering,
GPTNeoXForSequenceClassification,
GPTNeoXForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCAmelCase_ : str = (GPTNeoXForCausalLM,) if is_torch_available() else ()
lowerCAmelCase_ : Union[str, Any] = (
{
'feature-extraction': GPTNeoXModel,
'question-answering': GPTNeoXForQuestionAnswering,
'text-classification': GPTNeoXForSequenceClassification,
'text-generation': GPTNeoXForCausalLM,
'token-classification': GPTNeoXForTokenClassification,
'zero-shot': GPTNeoXForSequenceClassification,
}
if is_torch_available()
else {}
)
lowerCAmelCase_ : Union[str, Any] = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : Any = False
def A__ ( self ):
UpperCAmelCase_ = GPTNeoXModelTester(self )
UpperCAmelCase_ = ConfigTester(self , config_class=lowerCAmelCase , hidden_size=64 , num_attention_heads=8 )
def A__ ( self ):
self.config_tester.run_common_tests()
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
# This regression test was failing with PyTorch < 1.3
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_decoder()
UpperCAmelCase_ = None
self.model_tester.create_and_check_model_as_decoder(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowerCAmelCase )
@unittest.skip(reason="Feed forward chunking is not implemented" )
def A__ ( self ):
pass
@parameterized.expand([("linear",), ("dynamic",)] )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = ids_tensor([1, 10] , config.vocab_size )
UpperCAmelCase_ = ids_tensor([1, int(config.max_position_embeddings * 1.5 )] , config.vocab_size )
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ = GPTNeoXModel(lowerCAmelCase )
original_model.to(lowerCAmelCase )
original_model.eval()
UpperCAmelCase_ = original_model(lowerCAmelCase ).last_hidden_state
UpperCAmelCase_ = original_model(lowerCAmelCase ).last_hidden_state
set_seed(42 ) # Fixed seed at init time so the two models get the same random weights
UpperCAmelCase_ = {"type": scaling_type, "factor": 10.0}
UpperCAmelCase_ = GPTNeoXModel(lowerCAmelCase )
scaled_model.to(lowerCAmelCase )
scaled_model.eval()
UpperCAmelCase_ = scaled_model(lowerCAmelCase ).last_hidden_state
UpperCAmelCase_ = scaled_model(lowerCAmelCase ).last_hidden_state
# Dynamic scaling does not change the RoPE embeddings until it receives an input longer than the original
# maximum sequence length, so the outputs for the short input should match.
if scaling_type == "dynamic":
self.assertTrue(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-5 ) )
else:
self.assertFalse(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-5 ) )
# The output should be different for long inputs
self.assertFalse(torch.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-5 ) )
@require_torch
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase_ = AutoTokenizer.from_pretrained("EleutherAI/pythia-410m-deduped" )
for checkpointing in [True, False]:
UpperCAmelCase_ = GPTNeoXForCausalLM.from_pretrained("EleutherAI/pythia-410m-deduped" )
if checkpointing:
model.gradient_checkpointing_enable()
else:
model.gradient_checkpointing_disable()
model.to(lowerCAmelCase )
UpperCAmelCase_ = tokenizer("My favorite food is" , return_tensors="pt" ).to(lowerCAmelCase )
# The hub repo. is updated on 2023-04-04, resulting in poor outputs.
# See: https://github.com/huggingface/transformers/pull/24193
UpperCAmelCase_ = "My favorite food is a good old-fashioned, old-fashioned, old-fashioned.\n\nI'm not sure"
UpperCAmelCase_ = model.generate(**lowerCAmelCase , do_sample=lowerCAmelCase , max_new_tokens=20 )
UpperCAmelCase_ = tokenizer.batch_decode(lowerCAmelCase )[0]
self.assertEqual(lowerCAmelCase , lowerCAmelCase )
| 706 |
from typing import Optional, Union
import torch
from torch import nn
from ...configuration_utils import ConfigMixin, register_to_config
from ...models.modeling_utils import ModelMixin
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 768 , ):
super().__init__()
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.ones(1 , lowerCAmelCase ) )
def A__ ( self , lowerCAmelCase = None , lowerCAmelCase = None , ):
UpperCAmelCase_ = nn.Parameter(self.mean.to(lowerCAmelCase ).to(lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(self.std.to(lowerCAmelCase ).to(lowerCAmelCase ) )
return self
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds - self.mean) * 1.0 / self.std
return embeds
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (embeds * self.std) + self.mean
return embeds
| 23 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_speech_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_trocr": ["TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP", "TrOCRConfig"],
"processing_trocr": ["TrOCRProcessor"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TROCR_PRETRAINED_MODEL_ARCHIVE_LIST",
"TrOCRForCausalLM",
"TrOCRPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_trocr import TROCR_PRETRAINED_CONFIG_ARCHIVE_MAP, TrOCRConfig
from .processing_trocr import TrOCRProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_trocr import TROCR_PRETRAINED_MODEL_ARCHIVE_LIST, TrOCRForCausalLM, TrOCRPreTrainedModel
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 707 |
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self , *lowerCAmelCase , **lowerCAmelCase ):
super().__init__(*lowerCAmelCase , **lowerCAmelCase )
requires_backends(self , "vision" )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == "tf" else MODEL_FOR_VISION_2_SEQ_MAPPING )
def A__ ( self , lowerCAmelCase=None , lowerCAmelCase=None , lowerCAmelCase=None ):
UpperCAmelCase_ = {}
UpperCAmelCase_ = {}
if prompt is not None:
UpperCAmelCase_ = prompt
if generate_kwargs is not None:
UpperCAmelCase_ = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
UpperCAmelCase_ = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
"'max_new_tokens' is defined twice, once in 'generate_kwargs' and once as a direct parameter,"
" please use only one" )
UpperCAmelCase_ = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self , lowerCAmelCase , **lowerCAmelCase ):
return super().__call__(lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
UpperCAmelCase_ = load_image(lowerCAmelCase )
if prompt is not None:
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
raise ValueError(
f'''Received an invalid text input, got - {type(lowerCAmelCase )} - but expected a single string. '''
"Note also that one single text can be provided for conditional image to text generation." )
UpperCAmelCase_ = self.model.config.model_type
if model_type == "git":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(text=lowerCAmelCase , add_special_tokens=lowerCAmelCase ).input_ids
UpperCAmelCase_ = [self.tokenizer.cls_token_id] + input_ids
UpperCAmelCase_ = torch.tensor(lowerCAmelCase ).unsqueeze(0 )
model_inputs.update({"input_ids": input_ids} )
elif model_type == "pix2struct":
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , header_text=lowerCAmelCase , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
UpperCAmelCase_ = self.tokenizer(lowerCAmelCase , return_tensors=self.framework )
model_inputs.update(lowerCAmelCase )
else:
raise ValueError(f'''Model type {model_type} does not support conditional text generation''' )
else:
UpperCAmelCase_ = self.image_processor(images=lowerCAmelCase , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
UpperCAmelCase_ = None
return model_inputs
def A__ ( self , lowerCAmelCase , lowerCAmelCase=None ):
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs["input_ids"] , lowerCAmelCase )
and all(x is None for x in model_inputs["input_ids"] )
):
UpperCAmelCase_ = None
if generate_kwargs is None:
UpperCAmelCase_ = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
UpperCAmelCase_ = model_inputs.pop(self.model.main_input_name )
UpperCAmelCase_ = self.model.generate(lowerCAmelCase , **lowerCAmelCase , **lowerCAmelCase )
return model_outputs
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = []
for output_ids in model_outputs:
UpperCAmelCase_ = {
"generated_text": self.tokenizer.decode(
lowerCAmelCase , skip_special_tokens=lowerCAmelCase , )
}
records.append(lowerCAmelCase )
return records
| 23 | 0 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase ).to(lowerCAmelCase )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = model(input_ids.to(lowerCAmelCase ) , labels=labels.to(lowerCAmelCase ) ).loss
UpperCAmelCase_ = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 708 |
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowerCamelCase ( lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : int = TextToVideoSDPipeline
lowerCAmelCase_ : Dict = TEXT_TO_IMAGE_PARAMS
lowerCAmelCase_ : Optional[Any] = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
lowerCAmelCase_ : Optional[Any] = frozenset(
[
'num_inference_steps',
'generator',
'latents',
'return_dict',
'callback',
'callback_steps',
] )
def A__ ( self ):
torch.manual_seed(0 )
UpperCAmelCase_ = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=("CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "CrossAttnDownBlock3D", "DownBlock3D") , up_block_types=("UpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D", "CrossAttnUpBlock3D") , cross_attention_dim=32 , attention_head_dim=4 , )
UpperCAmelCase_ = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule="scaled_linear" , clip_sample=lowerCAmelCase , set_alpha_to_one=lowerCAmelCase , )
torch.manual_seed(0 )
UpperCAmelCase_ = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
UpperCAmelCase_ = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act="gelu" , projection_dim=512 , )
UpperCAmelCase_ = CLIPTextModel(lowerCAmelCase )
UpperCAmelCase_ = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
UpperCAmelCase_ = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
}
return components
def A__ ( self , lowerCAmelCase , lowerCAmelCase=0 ):
if str(lowerCAmelCase ).startswith("mps" ):
UpperCAmelCase_ = torch.manual_seed(lowerCAmelCase )
else:
UpperCAmelCase_ = torch.Generator(device=lowerCAmelCase ).manual_seed(lowerCAmelCase )
UpperCAmelCase_ = {
"prompt": "A painting of a squirrel eating a burger",
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"output_type": "pt",
}
return inputs
def A__ ( self ):
UpperCAmelCase_ = "cpu" # ensure determinism for the device-dependent torch.Generator
UpperCAmelCase_ = self.get_dummy_components()
UpperCAmelCase_ = TextToVideoSDPipeline(**lowerCAmelCase )
UpperCAmelCase_ = sd_pipe.to(lowerCAmelCase )
sd_pipe.set_progress_bar_config(disable=lowerCAmelCase )
UpperCAmelCase_ = self.get_dummy_inputs(lowerCAmelCase )
UpperCAmelCase_ = "np"
UpperCAmelCase_ = sd_pipe(**lowerCAmelCase ).frames
UpperCAmelCase_ = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
UpperCAmelCase_ = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def A__ ( self ):
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != "cuda" or not is_xformers_available() , reason="XFormers attention is only available with CUDA and `xformers` installed" , )
def A__ ( self ):
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=lowerCAmelCase , expected_max_diff=1e-2 )
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="Batching needs to be properly figured out first for this pipeline." )
def A__ ( self ):
pass
@unittest.skip(reason="`num_images_per_prompt` argument is not supported for this pipeline." )
def A__ ( self ):
pass
def A__ ( self ):
return super().test_progress_bar()
@slow
@skip_mps
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=25 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def A__ ( self ):
UpperCAmelCase_ = load_numpy(
"https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy" )
UpperCAmelCase_ = TextToVideoSDPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b" )
UpperCAmelCase_ = pipe.to("cuda" )
UpperCAmelCase_ = "Spiderman is surfing"
UpperCAmelCase_ = torch.Generator(device="cpu" ).manual_seed(0 )
UpperCAmelCase_ = pipe(lowerCAmelCase , generator=lowerCAmelCase , num_inference_steps=2 , output_type="pt" ).frames
UpperCAmelCase_ = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 23 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_efficientformer": [
"EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP",
"EfficientFormerConfig",
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["EfficientFormerImageProcessor"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"EfficientFormerForImageClassification",
"EfficientFormerForImageClassificationWithTeacher",
"EfficientFormerModel",
"EfficientFormerPreTrainedModel",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFEfficientFormerForImageClassification",
"TFEfficientFormerForImageClassificationWithTeacher",
"TFEfficientFormerModel",
"TFEfficientFormerPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_efficientformer import EFFICIENTFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, EfficientFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_efficientformer import EfficientFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_efficientformer import (
EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
EfficientFormerForImageClassification,
EfficientFormerForImageClassificationWithTeacher,
EfficientFormerModel,
EfficientFormerPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_efficientformer import (
TF_EFFICIENTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEfficientFormerForImageClassification,
TFEfficientFormerForImageClassificationWithTeacher,
TFEfficientFormerModel,
TFEfficientFormerPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 709 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = [1]
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = 0, 0, 0
UpperCAmelCase_ = ugly_nums[ia] * 2
UpperCAmelCase_ = ugly_nums[ia] * 3
UpperCAmelCase_ = ugly_nums[ia] * 5
for _ in range(1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
ugly_nums.append(__SCREAMING_SNAKE_CASE )
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
UpperCAmelCase_ = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(f'''{ugly_numbers(200) = }''')
| 23 | 0 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
if a < 0 or b < 0:
raise ValueError("the value of both inputs must be positive" )
UpperCAmelCase_ = str(bin(__SCREAMING_SNAKE_CASE ) )[2:] # remove the leading "0b"
UpperCAmelCase_ = str(bin(__SCREAMING_SNAKE_CASE ) )[2:]
UpperCAmelCase_ = max(len(__SCREAMING_SNAKE_CASE ) , len(__SCREAMING_SNAKE_CASE ) )
return "0b" + "".join(
str(int("1" in (char_a, char_b) ) )
for char_a, char_b in zip(a_binary.zfill(__SCREAMING_SNAKE_CASE ) , b_binary.zfill(__SCREAMING_SNAKE_CASE ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 710 |
import copy
import inspect
import unittest
from transformers import PretrainedConfig, SwiftFormerConfig
from transformers.testing_utils import (
require_torch,
require_vision,
slow,
torch_device,
)
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import SwiftFormerForImageClassification, SwiftFormerModel
from transformers.models.swiftformer.modeling_swiftformer import SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=13 , lowerCAmelCase=3 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=224 , lowerCAmelCase=1000 , lowerCAmelCase=[3, 3, 6, 4] , lowerCAmelCase=[48, 56, 112, 220] , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = num_channels
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = num_labels
UpperCAmelCase_ = image_size
UpperCAmelCase_ = layer_depths
UpperCAmelCase_ = embed_dims
def A__ ( self ):
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = None
if self.use_labels:
UpperCAmelCase_ = ids_tensor([self.batch_size] , self.num_labels )
UpperCAmelCase_ = self.get_config()
return config, pixel_values, labels
def A__ ( self ):
return SwiftFormerConfig(
depths=self.layer_depths , embed_dims=self.embed_dims , mlp_ratio=4 , downsamples=[True, True, True, True] , hidden_act="gelu" , num_labels=self.num_labels , down_patch_size=3 , down_stride=2 , down_pad=1 , drop_rate=0.0 , drop_path_rate=0.0 , use_layer_scale=lowerCAmelCase , layer_scale_init_value=1e-5 , )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = SwiftFormerModel(config=lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dims[-1], 7, 7) )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = self.num_labels
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = model(lowerCAmelCase , labels=lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
UpperCAmelCase_ = SwiftFormerForImageClassification(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
UpperCAmelCase_ = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
UpperCAmelCase_ = model(lowerCAmelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A__ ( self ):
((UpperCAmelCase_) , (UpperCAmelCase_) , (UpperCAmelCase_)) = self.prepare_config_and_inputs()
UpperCAmelCase_ = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Optional[Any] = (SwiftFormerModel, SwiftFormerForImageClassification) if is_torch_available() else ()
lowerCAmelCase_ : int = (
{'feature-extraction': SwiftFormerModel, 'image-classification': SwiftFormerForImageClassification}
if is_torch_available()
else {}
)
lowerCAmelCase_ : List[Any] = False
lowerCAmelCase_ : Dict = False
lowerCAmelCase_ : int = False
lowerCAmelCase_ : str = False
lowerCAmelCase_ : Optional[Any] = False
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerModelTester(self )
UpperCAmelCase_ = ConfigTester(
self , config_class=lowerCAmelCase , has_text_modality=lowerCAmelCase , hidden_size=37 , num_attention_heads=12 , num_hidden_layers=12 , )
def A__ ( self ):
self.config_tester.run_common_tests()
@unittest.skip(reason="SwiftFormer does not use inputs_embeds" )
def A__ ( self ):
pass
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(lowerCAmelCase , nn.Linear ) )
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(lowerCAmelCase )
UpperCAmelCase_ = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
UpperCAmelCase_ = [*signature.parameters.keys()]
UpperCAmelCase_ = ["pixel_values"]
self.assertListEqual(arg_names[:1] , lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase )
def A__ ( self ):
UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*lowerCAmelCase )
@slow
def A__ ( self ):
for model_name in SWIFTFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase_ = SwiftFormerModel.from_pretrained(lowerCAmelCase )
self.assertIsNotNone(lowerCAmelCase )
@unittest.skip(reason="SwiftFormer does not output attentions" )
def A__ ( self ):
pass
def A__ ( self ):
def check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = model_class(lowerCAmelCase )
model.to(lowerCAmelCase )
model.eval()
with torch.no_grad():
UpperCAmelCase_ = model(**self._prepare_for_class(lowerCAmelCase , lowerCAmelCase ) )
UpperCAmelCase_ = outputs.hidden_states
UpperCAmelCase_ = 8
self.assertEqual(len(lowerCAmelCase ) , lowerCAmelCase ) # TODO
# SwiftFormer's feature maps are of shape (batch_size, embed_dims, height, width)
# with the width and height being successively divided by 2, after every 2 blocks
for i in range(len(lowerCAmelCase ) ):
self.assertEqual(
hidden_states[i].shape , torch.Size(
[
self.model_tester.batch_size,
self.model_tester.embed_dims[i // 2],
(self.model_tester.image_size // 4) // 2 ** (i // 2),
(self.model_tester.image_size // 4) // 2 ** (i // 2),
] ) , )
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
UpperCAmelCase_ = True
check_hidden_states_output(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
def _config_zero_init(lowerCAmelCase ):
UpperCAmelCase_ = copy.deepcopy(lowerCAmelCase )
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key or "initializer_factor" in key or "layer_scale" in key:
setattr(lowerCAmelCase , lowerCAmelCase , 1e-1_0 )
if isinstance(getattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase ):
UpperCAmelCase_ = _config_zero_init(getattr(lowerCAmelCase , lowerCAmelCase ) )
setattr(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return configs_no_init
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
UpperCAmelCase_ = _config_zero_init(lowerCAmelCase )
for model_class in self.all_model_classes:
UpperCAmelCase_ = model_class(config=lowerCAmelCase )
for name, param in model.named_parameters():
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9) / 1e9).round().item() , [0.0, 1.0] , msg=f'''Parameter {name} of model {model_class} seems not properly initialized''' , )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def A__ ( self ):
pass
def snake_case__ ( ) -> str:
UpperCAmelCase_ = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def A__ ( self ):
return ViTImageProcessor.from_pretrained("MBZUAI/swiftformer-xs" ) if is_vision_available() else None
@slow
def A__ ( self ):
UpperCAmelCase_ = SwiftFormerForImageClassification.from_pretrained("MBZUAI/swiftformer-xs" ).to(lowerCAmelCase )
UpperCAmelCase_ = self.default_image_processor
UpperCAmelCase_ = prepare_img()
UpperCAmelCase_ = image_processor(images=lowerCAmelCase , return_tensors="pt" ).to(lowerCAmelCase )
# forward pass
with torch.no_grad():
UpperCAmelCase_ = model(**lowerCAmelCase )
# verify the logits
UpperCAmelCase_ = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , lowerCAmelCase )
UpperCAmelCase_ = torch.tensor([[-2.1_7_0_3e0_0, 2.1_1_0_7e0_0, -2.0_8_1_1e0_0]] ).to(lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , lowerCAmelCase , atol=1e-4 ) )
| 23 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
SCREAMING_SNAKE_CASE = {
"configuration_pegasus_x": ["PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP", "PegasusXConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST",
"PegasusXForConditionalGeneration",
"PegasusXModel",
"PegasusXPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 711 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available
SCREAMING_SNAKE_CASE = {"tokenization_herbert": ["HerbertTokenizer"]}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["HerbertTokenizerFast"]
if TYPE_CHECKING:
from .tokenization_herbert import HerbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_herbert_fast import HerbertTokenizerFast
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
import secrets
from random import shuffle
from string import ascii_letters, ascii_lowercase, ascii_uppercase, digits, punctuation
def snake_case__ ( __SCREAMING_SNAKE_CASE = 8 ) -> str:
UpperCAmelCase_ = ascii_letters + digits + punctuation
return "".join(secrets.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
# Password Generator = full boot with random_number, random_letters, and
# random_character FUNCTIONS
# Put your code here...
i -= len(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = i // 3
UpperCAmelCase_ = i % 3
# chars = chars_incl + random_letters(ascii_letters, i / 3 + remainder) +
# random_number(digits, i / 3) + random_characters(punctuation, i / 3)
UpperCAmelCase_ = (
chars_incl
+ random(__SCREAMING_SNAKE_CASE , quotient + remainder )
+ random(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
+ random(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
)
UpperCAmelCase_ = list(__SCREAMING_SNAKE_CASE )
shuffle(__SCREAMING_SNAKE_CASE )
return "".join(__SCREAMING_SNAKE_CASE )
# random is a generalised function for letters, characters and numbers
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
return "".join(secrets.choice(__SCREAMING_SNAKE_CASE ) for _ in range(__SCREAMING_SNAKE_CASE ) )
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> str:
pass # Put your code here...
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
pass # Put your code here...
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
pass # Put your code here...
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE = 8 ) -> bool:
if len(__SCREAMING_SNAKE_CASE ) < min_length:
# Your Password must be at least 8 characters long
return False
UpperCAmelCase_ = any(char in ascii_uppercase for char in password )
UpperCAmelCase_ = any(char in ascii_lowercase for char in password )
UpperCAmelCase_ = any(char in digits for char in password )
UpperCAmelCase_ = any(char in punctuation for char in password )
return upper and lower and num and spec_char
# Passwords should contain UPPERCASE, lowerase
# numbers, and special characters
def snake_case__ ( ) -> Optional[Any]:
UpperCAmelCase_ = int(input("Please indicate the max length of your password: " ).strip() )
UpperCAmelCase_ = input(
"Please indicate the characters that must be in your password: " ).strip()
print("Password generated:" , password_generator(__SCREAMING_SNAKE_CASE ) )
print(
"Alternative Password generated:" , alternative_password_generator(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) , )
print("[If you are thinking of using this passsword, You better save it.]" )
if __name__ == "__main__":
main()
| 712 |
import math
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> list[int]:
UpperCAmelCase_ = []
UpperCAmelCase_ = 2
UpperCAmelCase_ = int(math.sqrt(__SCREAMING_SNAKE_CASE ) ) # Size of every segment
UpperCAmelCase_ = [True] * (end + 1)
UpperCAmelCase_ = []
while start <= end:
if temp[start] is True:
in_prime.append(__SCREAMING_SNAKE_CASE )
for i in range(start * start , end + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
start += 1
prime += in_prime
UpperCAmelCase_ = end + 1
UpperCAmelCase_ = min(2 * end , __SCREAMING_SNAKE_CASE )
while low <= n:
UpperCAmelCase_ = [True] * (high - low + 1)
for each in in_prime:
UpperCAmelCase_ = math.floor(low / each ) * each
if t < low:
t += each
for j in range(__SCREAMING_SNAKE_CASE , high + 1 , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = False
for j in range(len(__SCREAMING_SNAKE_CASE ) ):
if temp[j] is True:
prime.append(j + low )
UpperCAmelCase_ = high + 1
UpperCAmelCase_ = min(high + end , __SCREAMING_SNAKE_CASE )
return prime
print(sieve(10**6))
| 23 | 0 |
import pytest
import requests
from datasets.utils.file_utils import http_head
from .utils import OfflineSimulationMode, RequestWouldHangIndefinitelyError, offline
@pytest.mark.integration
def snake_case__ ( ) -> List[Any]:
with offline(OfflineSimulationMode.CONNECTION_TIMES_OUT ):
with pytest.raises(__SCREAMING_SNAKE_CASE ):
requests.request("GET" , "https://huggingface.co" )
with pytest.raises(requests.exceptions.ConnectTimeout ):
requests.request("GET" , "https://huggingface.co" , timeout=1.0 )
@pytest.mark.integration
def snake_case__ ( ) -> int:
with offline(OfflineSimulationMode.CONNECTION_FAILS ):
with pytest.raises(requests.exceptions.ConnectionError ):
requests.request("GET" , "https://huggingface.co" )
def snake_case__ ( ) -> List[Any]:
with offline(OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1 ):
with pytest.raises(__SCREAMING_SNAKE_CASE ):
http_head("https://huggingface.co" )
| 713 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : torch.FloatTensor
class lowerCamelCase ( lowercase__, lowercase__ ):
'''simple docstring'''
@register_to_config
def __init__( self , lowerCAmelCase = 32 , lowerCAmelCase = 64 , lowerCAmelCase = 20 , lowerCAmelCase = 768 , lowerCAmelCase=77 , lowerCAmelCase=4 , lowerCAmelCase = 0.0 , lowerCAmelCase = "silu" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = "linear" , lowerCAmelCase = "prd" , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , ):
super().__init__()
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = attention_head_dim
UpperCAmelCase_ = num_attention_heads * attention_head_dim
UpperCAmelCase_ = additional_embeddings
UpperCAmelCase_ = time_embed_dim or inner_dim
UpperCAmelCase_ = embedding_proj_dim or embedding_dim
UpperCAmelCase_ = clip_embed_dim or embedding_dim
UpperCAmelCase_ = Timesteps(lowerCAmelCase , lowerCAmelCase , 0 )
UpperCAmelCase_ = TimestepEmbedding(lowerCAmelCase , lowerCAmelCase , out_dim=lowerCAmelCase , act_fn=lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if embedding_proj_norm_type is None:
UpperCAmelCase_ = None
elif embedding_proj_norm_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
if encoder_hid_proj_type is None:
UpperCAmelCase_ = None
elif encoder_hid_proj_type == "linear":
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , lowerCAmelCase ) )
if added_emb_type == "prd":
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , 1 , lowerCAmelCase ) )
elif added_emb_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
UpperCAmelCase_ = nn.ModuleList(
[
BasicTransformerBlock(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , dropout=lowerCAmelCase , activation_fn="gelu" , attention_bias=lowerCAmelCase , )
for d in range(lowerCAmelCase )
] )
if norm_in_type == "layer":
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
elif norm_in_type is None:
UpperCAmelCase_ = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
UpperCAmelCase_ = nn.LayerNorm(lowerCAmelCase )
UpperCAmelCase_ = nn.Linear(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -10000.0 )
causal_attention_mask.triu_(1 )
UpperCAmelCase_ = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , lowerCAmelCase , persistent=lowerCAmelCase )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
UpperCAmelCase_ = nn.Parameter(torch.zeros(1 , lowerCAmelCase ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def A__ ( self ):
UpperCAmelCase_ = {}
def fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
UpperCAmelCase_ = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
return processors
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = len(self.attn_processors.keys() )
if isinstance(lowerCAmelCase , lowerCAmelCase ) and len(lowerCAmelCase ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(lowerCAmelCase )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , "set_processor" ):
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
module.set_processor(lowerCAmelCase )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , lowerCAmelCase , lowerCAmelCase )
for name, module in self.named_children():
fn_recursive_attn_processor(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
self.set_attn_processor(AttnProcessor() )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = True , ):
UpperCAmelCase_ = hidden_states.shape[0]
UpperCAmelCase_ = timestep
if not torch.is_tensor(lowerCAmelCase ):
UpperCAmelCase_ = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(lowerCAmelCase ) and len(timesteps.shape ) == 0:
UpperCAmelCase_ = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
UpperCAmelCase_ = timesteps * torch.ones(lowerCAmelCase , dtype=timesteps.dtype , device=timesteps.device )
UpperCAmelCase_ = self.time_proj(lowerCAmelCase )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
UpperCAmelCase_ = timesteps_projected.to(dtype=self.dtype )
UpperCAmelCase_ = self.time_embedding(lowerCAmelCase )
if self.embedding_proj_norm is not None:
UpperCAmelCase_ = self.embedding_proj_norm(lowerCAmelCase )
UpperCAmelCase_ = self.embedding_proj(lowerCAmelCase )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
UpperCAmelCase_ = self.encoder_hidden_states_proj(lowerCAmelCase )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
UpperCAmelCase_ = self.proj_in(lowerCAmelCase )
UpperCAmelCase_ = self.positional_embedding.to(hidden_states.dtype )
UpperCAmelCase_ = []
UpperCAmelCase_ = 0
if encoder_hidden_states is not None:
additional_embeds.append(lowerCAmelCase )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
UpperCAmelCase_ = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
UpperCAmelCase_ = hidden_states[:, None, :]
UpperCAmelCase_ = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
UpperCAmelCase_ = self.prd_embedding.to(hidden_states.dtype ).expand(lowerCAmelCase , -1 , -1 )
additional_embeds.append(lowerCAmelCase )
UpperCAmelCase_ = torch.cat(
lowerCAmelCase , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
UpperCAmelCase_ = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
UpperCAmelCase_ = F.pad(
lowerCAmelCase , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
UpperCAmelCase_ = hidden_states + positional_embeddings
if attention_mask is not None:
UpperCAmelCase_ = (1 - attention_mask.to(hidden_states.dtype )) * -10000.0
UpperCAmelCase_ = F.pad(lowerCAmelCase , (0, self.additional_embeddings) , value=0.0 )
UpperCAmelCase_ = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
UpperCAmelCase_ = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
UpperCAmelCase_ = self.norm_in(lowerCAmelCase )
for block in self.transformer_blocks:
UpperCAmelCase_ = block(lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase_ = self.norm_out(lowerCAmelCase )
if self.prd_embedding is not None:
UpperCAmelCase_ = hidden_states[:, -1]
else:
UpperCAmelCase_ = hidden_states[:, additional_embeddings_len:]
UpperCAmelCase_ = self.proj_to_clip_embeddings(lowerCAmelCase )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=lowerCAmelCase )
def A__ ( self , lowerCAmelCase ):
UpperCAmelCase_ = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 23 | 0 |
import logging
import os
import sys
from dataclasses import dataclass, field
from itertools import chain
from typing import Optional, Union
import datasets
import numpy as np
import torch
from datasets import load_dataset
import transformers
from transformers import (
AutoConfig,
AutoModelForMultipleChoice,
AutoTokenizer,
HfArgumentParser,
Trainer,
TrainingArguments,
default_data_collator,
set_seed,
)
from transformers.tokenization_utils_base import PreTrainedTokenizerBase
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import PaddingStrategy, check_min_version, send_example_telemetry
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("4.31.0")
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
lowerCAmelCase_ : Optional[str] = field(
default=lowercase__, metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
lowerCAmelCase_ : Optional[str] = field(
default=lowercase__, metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
lowerCAmelCase_ : Optional[str] = field(
default=lowercase__, metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'}, )
lowerCAmelCase_ : bool = field(
default=lowercase__, metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'}, )
lowerCAmelCase_ : str = field(
default='main', metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'}, )
lowerCAmelCase_ : bool = field(
default=lowercase__, metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
}, )
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : Optional[str] = field(default=lowercase__, metadata={'help': 'The input training data file (a text file).'} )
lowerCAmelCase_ : Optional[str] = field(
default=lowercase__, metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'}, )
lowerCAmelCase_ : bool = field(
default=lowercase__, metadata={'help': 'Overwrite the cached training and evaluation sets'} )
lowerCAmelCase_ : Optional[int] = field(
default=lowercase__, metadata={'help': 'The number of processes to use for the preprocessing.'}, )
lowerCAmelCase_ : Optional[int] = field(
default=lowercase__, metadata={
'help': (
'The maximum total input sequence length after tokenization. If passed, sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
}, )
lowerCAmelCase_ : bool = field(
default=lowercase__, metadata={
'help': (
'Whether to pad all samples to the maximum sentence length. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch. More '
'efficient on GPU but very bad for TPU.'
)
}, )
lowerCAmelCase_ : Optional[int] = field(
default=lowercase__, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of training examples to this '
'value if set.'
)
}, )
lowerCAmelCase_ : Optional[int] = field(
default=lowercase__, metadata={
'help': (
'For debugging purposes or quicker training, truncate the number of evaluation examples to this '
'value if set.'
)
}, )
def A__ ( self ):
if self.train_file is not None:
UpperCAmelCase_ = self.train_file.split("." )[-1]
assert extension in ["csv", "json"], "`train_file` should be a csv or a json file."
if self.validation_file is not None:
UpperCAmelCase_ = self.validation_file.split("." )[-1]
assert extension in ["csv", "json"], "`validation_file` should be a csv or a json file."
@dataclass
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : PreTrainedTokenizerBase
lowerCAmelCase_ : Union[bool, str, PaddingStrategy] = True
lowerCAmelCase_ : Optional[int] = None
lowerCAmelCase_ : Optional[int] = None
def __call__( self , lowerCAmelCase ):
UpperCAmelCase_ = "label" if "label" in features[0].keys() else "labels"
UpperCAmelCase_ = [feature.pop(lowerCAmelCase ) for feature in features]
UpperCAmelCase_ = len(lowerCAmelCase )
UpperCAmelCase_ = len(features[0]["input_ids"] )
UpperCAmelCase_ = [
[{k: v[i] for k, v in feature.items()} for i in range(lowerCAmelCase )] for feature in features
]
UpperCAmelCase_ = list(chain(*lowerCAmelCase ) )
UpperCAmelCase_ = self.tokenizer.pad(
lowerCAmelCase , padding=self.padding , max_length=self.max_length , pad_to_multiple_of=self.pad_to_multiple_of , return_tensors="pt" , )
# Un-flatten
UpperCAmelCase_ = {k: v.view(lowerCAmelCase , lowerCAmelCase , -1 ) for k, v in batch.items()}
# Add back labels
UpperCAmelCase_ = torch.tensor(lowerCAmelCase , dtype=torch.intaa )
return batch
def snake_case__ ( ) -> Union[str, Any]:
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
UpperCAmelCase_ = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_swag" , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
UpperCAmelCase_ = training_args.get_process_log_level()
logger.setLevel(__SCREAMING_SNAKE_CASE )
datasets.utils.logging.set_verbosity(__SCREAMING_SNAKE_CASE )
transformers.utils.logging.set_verbosity(__SCREAMING_SNAKE_CASE )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
f'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ f'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(f'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
UpperCAmelCase_ = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
UpperCAmelCase_ = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
f'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
f'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.train_file is not None or data_args.validation_file is not None:
UpperCAmelCase_ = {}
if data_args.train_file is not None:
UpperCAmelCase_ = data_args.train_file
if data_args.validation_file is not None:
UpperCAmelCase_ = data_args.validation_file
UpperCAmelCase_ = data_args.train_file.split("." )[-1]
UpperCAmelCase_ = load_dataset(
__SCREAMING_SNAKE_CASE , data_files=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
else:
# Downloading and loading the swag dataset from the hub.
UpperCAmelCase_ = load_dataset(
"swag" , "regular" , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
UpperCAmelCase_ = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase_ = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , use_fast=model_args.use_fast_tokenizer , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
UpperCAmelCase_ = AutoModelForMultipleChoice.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=__SCREAMING_SNAKE_CASE , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# When using your own dataset or a different dataset from swag, you will probably need to change this.
UpperCAmelCase_ = [f'''ending{i}''' for i in range(4 )]
UpperCAmelCase_ = "sent1"
UpperCAmelCase_ = "sent2"
if data_args.max_seq_length is None:
UpperCAmelCase_ = tokenizer.model_max_length
if max_seq_length > 1024:
logger.warning(
"The chosen tokenizer supports a `model_max_length` that is longer than the default `block_size` value"
" of 1024. If you would like to use a longer `block_size` up to `tokenizer.model_max_length` you can"
" override this default with `--block_size xxx`." )
UpperCAmelCase_ = 1024
else:
if data_args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f'''The max_seq_length passed ({data_args.max_seq_length}) is larger than the maximum length for the'''
f'''model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}.''' )
UpperCAmelCase_ = min(data_args.max_seq_length , tokenizer.model_max_length )
# Preprocessing the datasets.
def preprocess_function(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = [[context] * 4 for context in examples[context_name]]
UpperCAmelCase_ = examples[question_header_name]
UpperCAmelCase_ = [
[f'''{header} {examples[end][i]}''' for end in ending_names] for i, header in enumerate(__SCREAMING_SNAKE_CASE )
]
# Flatten out
UpperCAmelCase_ = list(chain(*__SCREAMING_SNAKE_CASE ) )
UpperCAmelCase_ = list(chain(*__SCREAMING_SNAKE_CASE ) )
# Tokenize
UpperCAmelCase_ = tokenizer(
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , truncation=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="max_length" if data_args.pad_to_max_length else False , )
# Un-flatten
return {k: [v[i : i + 4] for i in range(0 , len(__SCREAMING_SNAKE_CASE ) , 4 )] for k, v in tokenized_examples.items()}
if training_args.do_train:
if "train" not in raw_datasets:
raise ValueError("--do_train requires a train dataset" )
UpperCAmelCase_ = raw_datasets["train"]
if data_args.max_train_samples is not None:
UpperCAmelCase_ = min(len(__SCREAMING_SNAKE_CASE ) , data_args.max_train_samples )
UpperCAmelCase_ = train_dataset.select(range(__SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="train dataset map pre-processing" ):
UpperCAmelCase_ = train_dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
if training_args.do_eval:
if "validation" not in raw_datasets:
raise ValueError("--do_eval requires a validation dataset" )
UpperCAmelCase_ = raw_datasets["validation"]
if data_args.max_eval_samples is not None:
UpperCAmelCase_ = min(len(__SCREAMING_SNAKE_CASE ) , data_args.max_eval_samples )
UpperCAmelCase_ = eval_dataset.select(range(__SCREAMING_SNAKE_CASE ) )
with training_args.main_process_first(desc="validation dataset map pre-processing" ):
UpperCAmelCase_ = eval_dataset.map(
__SCREAMING_SNAKE_CASE , batched=__SCREAMING_SNAKE_CASE , num_proc=data_args.preprocessing_num_workers , load_from_cache_file=not data_args.overwrite_cache , )
# Data collator
UpperCAmelCase_ = (
default_data_collator
if data_args.pad_to_max_length
else DataCollatorForMultipleChoice(tokenizer=__SCREAMING_SNAKE_CASE , pad_to_multiple_of=8 if training_args.fpaa else None )
)
# Metric
def compute_metrics(__SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ , UpperCAmelCase_ = eval_predictions
UpperCAmelCase_ = np.argmax(__SCREAMING_SNAKE_CASE , axis=1 )
return {"accuracy": (preds == label_ids).astype(np.floataa ).mean().item()}
# Initialize our Trainer
UpperCAmelCase_ = Trainer(
model=__SCREAMING_SNAKE_CASE , args=__SCREAMING_SNAKE_CASE , train_dataset=train_dataset if training_args.do_train else None , eval_dataset=eval_dataset if training_args.do_eval else None , tokenizer=__SCREAMING_SNAKE_CASE , data_collator=__SCREAMING_SNAKE_CASE , compute_metrics=__SCREAMING_SNAKE_CASE , )
# Training
if training_args.do_train:
UpperCAmelCase_ = None
if training_args.resume_from_checkpoint is not None:
UpperCAmelCase_ = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
UpperCAmelCase_ = last_checkpoint
UpperCAmelCase_ = trainer.train(resume_from_checkpoint=__SCREAMING_SNAKE_CASE )
trainer.save_model() # Saves the tokenizer too for easy upload
UpperCAmelCase_ = train_result.metrics
UpperCAmelCase_ = (
data_args.max_train_samples if data_args.max_train_samples is not None else len(__SCREAMING_SNAKE_CASE )
)
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("train" , __SCREAMING_SNAKE_CASE )
trainer.save_metrics("train" , __SCREAMING_SNAKE_CASE )
trainer.save_state()
# Evaluation
if training_args.do_eval:
logger.info("*** Evaluate ***" )
UpperCAmelCase_ = trainer.evaluate()
UpperCAmelCase_ = data_args.max_eval_samples if data_args.max_eval_samples is not None else len(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = min(__SCREAMING_SNAKE_CASE , len(__SCREAMING_SNAKE_CASE ) )
trainer.log_metrics("eval" , __SCREAMING_SNAKE_CASE )
trainer.save_metrics("eval" , __SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = {
"finetuned_from": model_args.model_name_or_path,
"tasks": "multiple-choice",
"dataset_tags": "swag",
"dataset_args": "regular",
"dataset": "SWAG",
"language": "en",
}
if training_args.push_to_hub:
trainer.push_to_hub(**__SCREAMING_SNAKE_CASE )
else:
trainer.create_model_card(**__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> Optional[Any]:
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 714 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {
"configuration_gpt_bigcode": ["GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP", "GPTBigCodeConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST",
"GPTBigCodeForSequenceClassification",
"GPTBigCodeForTokenClassification",
"GPTBigCodeForCausalLM",
"GPTBigCodeModel",
"GPTBigCodePreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
from pathlib import Path
import cva
import numpy as np
from matplotlib import pyplot as plt
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> np.ndarray:
UpperCAmelCase_ = cva.getAffineTransform(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
return cva.warpAffine(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , (rows, cols) )
if __name__ == "__main__":
# read original image
SCREAMING_SNAKE_CASE = cva.imread(
str(Path(__file__).resolve().parent.parent / "image_data" / "lena.jpg")
)
# turn image in gray scale value
SCREAMING_SNAKE_CASE = cva.cvtColor(image, cva.COLOR_BGR2GRAY)
# get image shape
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = gray_img.shape
# set different points to rotate image
SCREAMING_SNAKE_CASE = np.array([[50, 50], [200, 50], [50, 200]], np.floataa)
SCREAMING_SNAKE_CASE = np.array([[10, 100], [200, 50], [100, 250]], np.floataa)
SCREAMING_SNAKE_CASE = np.array([[50, 50], [150, 50], [120, 200]], np.floataa)
SCREAMING_SNAKE_CASE = np.array([[10, 100], [80, 50], [180, 250]], np.floataa)
# add all rotated images in a list
SCREAMING_SNAKE_CASE = [
gray_img,
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
get_rotation(gray_img, ptsa, ptsa, img_rows, img_cols),
]
# plot different image rotations
SCREAMING_SNAKE_CASE = plt.figure(1)
SCREAMING_SNAKE_CASE = ["Original", "Rotation 1", "Rotation 2", "Rotation 3"]
for i, image in enumerate(images):
plt.subplot(2, 2, i + 1), plt.imshow(image, "gray")
plt.title(titles[i])
plt.axis("off")
plt.subplots_adjust(left=0.0, bottom=0.05, right=1.0, top=0.95)
plt.show()
| 715 |
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
SCREAMING_SNAKE_CASE = {
"xlm-roberta-base": "https://huggingface.co/xlm-roberta-base/resolve/main/config.json",
"xlm-roberta-large": "https://huggingface.co/xlm-roberta-large/resolve/main/config.json",
"xlm-roberta-large-finetuned-conll02-dutch": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-dutch/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll02-spanish": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll02-spanish/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-english": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-english/resolve/main/config.json"
),
"xlm-roberta-large-finetuned-conll03-german": (
"https://huggingface.co/xlm-roberta-large-finetuned-conll03-german/resolve/main/config.json"
),
}
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = 'xlm-roberta'
def __init__( self , lowerCAmelCase=3_0522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=1 , lowerCAmelCase=0 , lowerCAmelCase=2 , lowerCAmelCase="absolute" , lowerCAmelCase=True , lowerCAmelCase=None , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , bos_token_id=lowerCAmelCase , eos_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = position_embedding_type
UpperCAmelCase_ = use_cache
UpperCAmelCase_ = classifier_dropout
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def A__ ( self ):
if self.task == "multiple-choice":
UpperCAmelCase_ = {0: "batch", 1: "choice", 2: "sequence"}
else:
UpperCAmelCase_ = {0: "batch", 1: "sequence"}
return OrderedDict(
[
("input_ids", dynamic_axis),
("attention_mask", dynamic_axis),
] )
| 23 | 0 |
import logging
import os
from dataclasses import dataclass
from typing import List, Optional, Union
import tqdm
from filelock import FileLock
from transformers import (
BartTokenizer,
BartTokenizerFast,
DataProcessor,
PreTrainedTokenizer,
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
is_tf_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
@dataclass(frozen=lowercase__ )
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : str
lowerCAmelCase_ : str
lowerCAmelCase_ : Optional[str] = None
lowerCAmelCase_ : Optional[str] = None
lowerCAmelCase_ : Optional[str] = None
@dataclass(frozen=lowercase__ )
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : List[int]
lowerCAmelCase_ : Optional[List[int]] = None
lowerCAmelCase_ : Optional[List[int]] = None
lowerCAmelCase_ : Optional[Union[int, float]] = None
lowerCAmelCase_ : Optional[int] = None
if is_torch_available():
import torch
from torch.utils.data import Dataset
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[InputFeatures]
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase=False , lowerCAmelCase = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = os.path.join(
lowerCAmelCase , "cached_{}_{}_{}_{}".format(
"dev" if evaluate else "train" , tokenizer.__class__.__name__ , str(lowerCAmelCase ) , lowerCAmelCase , ) , )
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
UpperCAmelCase_ = cached_features_file + ".lock"
with FileLock(lowerCAmelCase ):
if os.path.exists(lowerCAmelCase ) and not overwrite_cache:
logger.info(f'''Loading features from cached file {cached_features_file}''' )
UpperCAmelCase_ = torch.load(lowerCAmelCase )
else:
logger.info(f'''Creating features from dataset file at {data_dir}''' )
UpperCAmelCase_ = (
processor.get_dev_examples(lowerCAmelCase ) if evaluate else processor.get_train_examples(lowerCAmelCase )
)
logger.info("Training examples: %s" , len(lowerCAmelCase ) )
UpperCAmelCase_ = hans_convert_examples_to_features(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
logger.info("Saving features into cached file %s" , lowerCAmelCase )
torch.save(self.features , lowerCAmelCase )
def __len__( self ):
return len(self.features )
def __getitem__( self , lowerCAmelCase ):
return self.features[i]
def A__ ( self ):
return self.label_list
if is_tf_available():
import tensorflow as tf
class lowerCamelCase :
'''simple docstring'''
lowerCAmelCase_ : List[InputFeatures]
def __init__( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = 128 , lowerCAmelCase=False , lowerCAmelCase = False , ):
UpperCAmelCase_ = hans_processors[task]()
UpperCAmelCase_ = processor.get_labels()
if tokenizer.__class__ in (
RobertaTokenizer,
RobertaTokenizerFast,
XLMRobertaTokenizer,
BartTokenizer,
BartTokenizerFast,
):
# HACK(label indices are swapped in RoBERTa pretrained model)
UpperCAmelCase_ , UpperCAmelCase_ = label_list[2], label_list[1]
UpperCAmelCase_ = label_list
UpperCAmelCase_ = processor.get_dev_examples(lowerCAmelCase ) if evaluate else processor.get_train_examples(lowerCAmelCase )
UpperCAmelCase_ = hans_convert_examples_to_features(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def gen():
for ex_index, ex in tqdm.tqdm(enumerate(self.features ) , desc="convert examples to features" ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d of %d" % (ex_index, len(lowerCAmelCase )) )
yield (
{
"example_id": 0,
"input_ids": ex.input_ids,
"attention_mask": ex.attention_mask,
"token_type_ids": ex.token_type_ids,
},
ex.label,
)
UpperCAmelCase_ = tf.data.Dataset.from_generator(
lowerCAmelCase , (
{
"example_id": tf.intaa,
"input_ids": tf.intaa,
"attention_mask": tf.intaa,
"token_type_ids": tf.intaa,
},
tf.intaa,
) , (
{
"example_id": tf.TensorShape([] ),
"input_ids": tf.TensorShape([None, None] ),
"attention_mask": tf.TensorShape([None, None] ),
"token_type_ids": tf.TensorShape([None, None] ),
},
tf.TensorShape([] ),
) , )
def A__ ( self ):
return self.dataset
def __len__( self ):
return len(self.features )
def __getitem__( self , lowerCAmelCase ):
return self.features[i]
def A__ ( self ):
return self.label_list
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def A__ ( self , lowerCAmelCase ):
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase , "heuristics_train_set.txt" ) ) , "train" )
def A__ ( self , lowerCAmelCase ):
return self._create_examples(self._read_tsv(os.path.join(lowerCAmelCase , "heuristics_evaluation_set.txt" ) ) , "dev" )
def A__ ( self ):
return ["contradiction", "entailment", "neutral"]
def A__ ( self , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = []
for i, line in enumerate(lowerCAmelCase ):
if i == 0:
continue
UpperCAmelCase_ = "%s-%s" % (set_type, line[0])
UpperCAmelCase_ = line[5]
UpperCAmelCase_ = line[6]
UpperCAmelCase_ = line[7][2:] if line[7].startswith("ex" ) else line[7]
UpperCAmelCase_ = line[0]
examples.append(InputExample(guid=lowerCAmelCase , text_a=lowerCAmelCase , text_b=lowerCAmelCase , label=lowerCAmelCase , pairID=lowerCAmelCase ) )
return examples
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , ) -> int:
UpperCAmelCase_ = {label: i for i, label in enumerate(__SCREAMING_SNAKE_CASE )}
UpperCAmelCase_ = []
for ex_index, example in tqdm.tqdm(enumerate(__SCREAMING_SNAKE_CASE ) , desc="convert examples to features" ):
if ex_index % 1_0000 == 0:
logger.info("Writing example %d" % (ex_index) )
UpperCAmelCase_ = tokenizer(
example.text_a , example.text_b , add_special_tokens=__SCREAMING_SNAKE_CASE , max_length=__SCREAMING_SNAKE_CASE , padding="max_length" , truncation=__SCREAMING_SNAKE_CASE , return_overflowing_tokens=__SCREAMING_SNAKE_CASE , )
UpperCAmelCase_ = label_map[example.label] if example.label in label_map else 0
UpperCAmelCase_ = int(example.pairID )
features.append(InputFeatures(**__SCREAMING_SNAKE_CASE , label=__SCREAMING_SNAKE_CASE , pairID=__SCREAMING_SNAKE_CASE ) )
for i, example in enumerate(examples[:5] ):
logger.info("*** Example ***" )
logger.info(f'''guid: {example}''' )
logger.info(f'''features: {features[i]}''' )
return features
SCREAMING_SNAKE_CASE = {
"hans": 3,
}
SCREAMING_SNAKE_CASE = {
"hans": HansProcessor,
}
| 716 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = int(__SCREAMING_SNAKE_CASE )
if decimal in (0, 1): # Exit cases for the recursion
return str(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ , UpperCAmelCase_ = divmod(__SCREAMING_SNAKE_CASE , 2 )
return binary_recursive(__SCREAMING_SNAKE_CASE ) + str(__SCREAMING_SNAKE_CASE )
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> str:
UpperCAmelCase_ = str(__SCREAMING_SNAKE_CASE ).strip()
if not number:
raise ValueError("No input value was provided" )
UpperCAmelCase_ = "-" if number.startswith("-" ) else ""
UpperCAmelCase_ = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return f'''{negative}0b{binary_recursive(int(__SCREAMING_SNAKE_CASE ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 23 | 0 |
import logging
from transformers.configuration_utils import PretrainedConfig
SCREAMING_SNAKE_CASE = logging.getLogger(__name__)
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[str] = 'masked_bert'
def __init__( self , lowerCAmelCase=3_0522 , lowerCAmelCase=768 , lowerCAmelCase=12 , lowerCAmelCase=12 , lowerCAmelCase=3072 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=2 , lowerCAmelCase=0.02 , lowerCAmelCase=1e-1_2 , lowerCAmelCase=0 , lowerCAmelCase="topK" , lowerCAmelCase="constant" , lowerCAmelCase=0.0 , **lowerCAmelCase , ):
super().__init__(pad_token_id=lowerCAmelCase , **lowerCAmelCase )
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = type_vocab_size
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = layer_norm_eps
UpperCAmelCase_ = pruning_method
UpperCAmelCase_ = mask_init
UpperCAmelCase_ = mask_scale
| 717 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
if is_torch_available():
from transformers import AutoModelForSeqaSeqLM, AutoTokenizer
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def A__ ( self ):
UpperCAmelCase_ = AutoModelForSeqaSeqLM.from_pretrained("google/mt5-small" , return_dict=lowerCAmelCase ).to(lowerCAmelCase )
UpperCAmelCase_ = AutoTokenizer.from_pretrained("google/mt5-small" )
UpperCAmelCase_ = tokenizer("Hello there" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = tokenizer("Hi I am" , return_tensors="pt" ).input_ids
UpperCAmelCase_ = model(input_ids.to(lowerCAmelCase ) , labels=labels.to(lowerCAmelCase ) ).loss
UpperCAmelCase_ = -(labels.shape[-1] * loss.item())
UpperCAmelCase_ = -84.9127
self.assertTrue(abs(mtf_score - EXPECTED_SCORE ) < 1e-4 )
| 23 | 0 |
import tempfile
import unittest
import numpy as np
import transformers
from transformers import GPTaTokenizer, GPTJConfig, is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax, tooslow
from ...generation.test_flax_utils import FlaxGenerationTesterMixin
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax
import jax.numpy as jnp
from transformers.modeling_flax_pytorch_utils import (
convert_pytorch_state_dict_to_flax,
load_flax_weights_in_pytorch_model,
)
from transformers.models.gptj.modeling_flax_gptj import FlaxGPTJForCausalLM, FlaxGPTJModel
if is_torch_available():
import torch
class lowerCamelCase :
'''simple docstring'''
def __init__( self , lowerCAmelCase , lowerCAmelCase=14 , lowerCAmelCase=7 , lowerCAmelCase=True , lowerCAmelCase=True , lowerCAmelCase=False , lowerCAmelCase=True , lowerCAmelCase=99 , lowerCAmelCase=32 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=4 , lowerCAmelCase=37 , lowerCAmelCase="gelu" , lowerCAmelCase=0.1 , lowerCAmelCase=0.1 , lowerCAmelCase=512 , lowerCAmelCase=0.02 , ):
UpperCAmelCase_ = parent
UpperCAmelCase_ = batch_size
UpperCAmelCase_ = seq_length
UpperCAmelCase_ = is_training
UpperCAmelCase_ = use_input_mask
UpperCAmelCase_ = use_token_type_ids
UpperCAmelCase_ = use_labels
UpperCAmelCase_ = vocab_size
UpperCAmelCase_ = hidden_size
UpperCAmelCase_ = rotary_dim
UpperCAmelCase_ = num_hidden_layers
UpperCAmelCase_ = num_attention_heads
UpperCAmelCase_ = intermediate_size
UpperCAmelCase_ = hidden_act
UpperCAmelCase_ = hidden_dropout_prob
UpperCAmelCase_ = attention_probs_dropout_prob
UpperCAmelCase_ = max_position_embeddings
UpperCAmelCase_ = initializer_range
UpperCAmelCase_ = None
UpperCAmelCase_ = vocab_size - 1
UpperCAmelCase_ = vocab_size - 1
UpperCAmelCase_ = vocab_size - 1
def A__ ( self ):
UpperCAmelCase_ = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase_ = None
if self.use_input_mask:
UpperCAmelCase_ = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase_ = GPTJConfig(
vocab_size=self.vocab_size , n_embd=self.hidden_size , n_layer=self.num_hidden_layers , n_head=self.num_attention_heads , n_positions=self.max_position_embeddings , use_cache=lowerCAmelCase , bos_token_id=self.bos_token_id , eos_token_id=self.eos_token_id , pad_token_id=self.pad_token_id , rotary_dim=self.rotary_dim , )
return (config, input_ids, input_mask)
def A__ ( self ):
UpperCAmelCase_ = self.prepare_config_and_inputs()
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = config_and_inputs
UpperCAmelCase_ = {"input_ids": input_ids, "attention_mask": attention_mask}
return config, inputs_dict
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(lowerCAmelCase )
UpperCAmelCase_ = model.init_cache(input_ids.shape[0] , lowerCAmelCase )
UpperCAmelCase_ = jnp.ones((input_ids.shape[0], max_decoder_length) , dtype="i4" )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase_ = model(
input_ids[:, :-1] , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , position_ids=lowerCAmelCase , )
UpperCAmelCase_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ = model(
input_ids[:, -1:] , attention_mask=lowerCAmelCase , past_key_values=outputs_cache.past_key_values , position_ids=lowerCAmelCase , )
UpperCAmelCase_ = model(lowerCAmelCase )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ):
UpperCAmelCase_ = 20
UpperCAmelCase_ = model_class_name(lowerCAmelCase )
UpperCAmelCase_ = jnp.concatenate(
[attention_mask, jnp.zeros((attention_mask.shape[0], max_decoder_length - attention_mask.shape[1]) )] , axis=-1 , )
UpperCAmelCase_ = model.init_cache(input_ids.shape[0] , lowerCAmelCase )
UpperCAmelCase_ = jnp.broadcast_to(
jnp.arange(input_ids.shape[-1] - 1 )[None, :] , (input_ids.shape[0], input_ids.shape[-1] - 1) )
UpperCAmelCase_ = model(
input_ids[:, :-1] , attention_mask=lowerCAmelCase , past_key_values=lowerCAmelCase , position_ids=lowerCAmelCase , )
UpperCAmelCase_ = jnp.array(input_ids.shape[0] * [[input_ids.shape[-1] - 1]] , dtype="i4" )
UpperCAmelCase_ = model(
input_ids[:, -1:] , past_key_values=outputs_cache.past_key_values , attention_mask=lowerCAmelCase , position_ids=lowerCAmelCase , )
UpperCAmelCase_ = model(lowerCAmelCase , attention_mask=lowerCAmelCase )
UpperCAmelCase_ = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f'''Max diff is {diff}''' )
@require_flax
class lowerCamelCase ( lowercase__, lowercase__, unittest.TestCase ):
'''simple docstring'''
lowerCAmelCase_ : Union[str, Any] = (FlaxGPTJModel, FlaxGPTJForCausalLM) if is_flax_available() else ()
lowerCAmelCase_ : Dict = (FlaxGPTJForCausalLM,) if is_flax_available() else ()
def A__ ( self ):
UpperCAmelCase_ = FlaxGPTJModelTester(self )
def A__ ( self ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
def A__ ( self ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ , UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs()
self.model_tester.check_use_cache_forward_with_attn_mask(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
@tooslow
def A__ ( self ):
UpperCAmelCase_ = GPTaTokenizer.from_pretrained("gpt2" , pad_token="<|endoftext|>" , padding_side="left" )
UpperCAmelCase_ = tokenizer(["Hello this is a long string", "Hey"] , return_tensors="np" , padding=lowerCAmelCase , truncation=lowerCAmelCase )
UpperCAmelCase_ = FlaxGPTJForCausalLM.from_pretrained("EleutherAI/gpt-j-6B" )
UpperCAmelCase_ = False
UpperCAmelCase_ = model.config.eos_token_id
UpperCAmelCase_ = jax.jit(model.generate )
UpperCAmelCase_ = jit_generate(
inputs["input_ids"] , attention_mask=inputs["attention_mask"] , pad_token_id=tokenizer.pad_token_id ).sequences
UpperCAmelCase_ = tokenizer.batch_decode(lowerCAmelCase , skip_special_tokens=lowerCAmelCase )
UpperCAmelCase_ = [
"Hello this is a long string of text.\n\nI'm trying to get the text of the",
"Hey, I'm a little late to the party. I'm going to",
]
self.assertListEqual(lowerCAmelCase , lowerCAmelCase )
@is_pt_flax_cross_test
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase_ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase_ = getattr(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ , UpperCAmelCase_ = pt_inputs["input_ids"].shape
UpperCAmelCase_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
UpperCAmelCase_ = pt_model_class(lowerCAmelCase ).eval()
UpperCAmelCase_ = model_class(lowerCAmelCase , dtype=jnp.floataa )
UpperCAmelCase_ = convert_pytorch_state_dict_to_flax(pt_model.state_dict() , lowerCAmelCase )
UpperCAmelCase_ = fx_state
with torch.no_grad():
UpperCAmelCase_ = pt_model(**lowerCAmelCase ).to_tuple()
UpperCAmelCase_ = fx_model(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
pt_model.save_pretrained(lowerCAmelCase )
UpperCAmelCase_ = model_class.from_pretrained(lowerCAmelCase , from_pt=lowerCAmelCase )
UpperCAmelCase_ = fx_model_loaded(**lowerCAmelCase ).to_tuple()
self.assertEqual(
len(lowerCAmelCase ) , len(lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output_loaded, pt_output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assert_almost_equals(fx_output_loaded[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@is_pt_flax_cross_test
def A__ ( self ):
UpperCAmelCase_ , UpperCAmelCase_ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
# prepare inputs
UpperCAmelCase_ = self._prepare_for_class(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = {k: torch.tensor(v.tolist() ) for k, v in prepared_inputs_dict.items()}
# load corresponding PyTorch class
UpperCAmelCase_ = model_class.__name__[4:] # Skip the "Flax" at the beginning
UpperCAmelCase_ = getattr(lowerCAmelCase , lowerCAmelCase )
UpperCAmelCase_ = pt_model_class(lowerCAmelCase ).eval()
UpperCAmelCase_ = model_class(lowerCAmelCase , dtype=jnp.floataa )
UpperCAmelCase_ = load_flax_weights_in_pytorch_model(lowerCAmelCase , fx_model.params )
UpperCAmelCase_ , UpperCAmelCase_ = pt_inputs["input_ids"].shape
UpperCAmelCase_ = np.random.randint(0 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase ):
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
UpperCAmelCase_ = 0
UpperCAmelCase_ = 1
# make sure weights are tied in PyTorch
pt_model.tie_weights()
with torch.no_grad():
UpperCAmelCase_ = pt_model(**lowerCAmelCase ).to_tuple()
UpperCAmelCase_ = fx_model(**lowerCAmelCase ).to_tuple()
self.assertEqual(len(lowerCAmelCase ) , len(lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
with tempfile.TemporaryDirectory() as tmpdirname:
fx_model.save_pretrained(lowerCAmelCase )
UpperCAmelCase_ = pt_model_class.from_pretrained(lowerCAmelCase , from_flax=lowerCAmelCase )
with torch.no_grad():
UpperCAmelCase_ = pt_model_loaded(**lowerCAmelCase ).to_tuple()
self.assertEqual(
len(lowerCAmelCase ) , len(lowerCAmelCase ) , "Output lengths differ between Flax and PyTorch" )
for fx_output, pt_output in zip(lowerCAmelCase , lowerCAmelCase ):
self.assert_almost_equals(fx_output[:, -1] , pt_output[:, -1].numpy() , 4e-2 )
@tooslow
def A__ ( self ):
for model_class_name in self.all_model_classes:
UpperCAmelCase_ = model_class_name.from_pretrained("EleutherAI/gpt-j-6B" )
UpperCAmelCase_ = model(np.ones((1, 1) ) )
self.assertIsNotNone(lowerCAmelCase )
| 718 |
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Tuple:
UpperCAmelCase_ = 0
while b > 0:
if b & 1:
UpperCAmelCase_ = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 23 | 0 |
'''simple docstring'''
# A Bipartite Graph is a graph whose vertices can be divided into two independent sets,
# U and V such that every edge (u, v) either connects a vertex from U to V or a vertex
# from V to U. In other words, for every edge (u, v), either u belongs to U and v to V,
# or u belongs to V and v to U. We can also say that there is no edge that connects
# vertices of same set.
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = [False] * len(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = [-1] * len(__SCREAMING_SNAKE_CASE )
def dfs(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
UpperCAmelCase_ = True
UpperCAmelCase_ = c
for u in graph[v]:
if not visited[u]:
dfs(__SCREAMING_SNAKE_CASE , 1 - c )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
if not visited[i]:
dfs(__SCREAMING_SNAKE_CASE , 0 )
for i in range(len(__SCREAMING_SNAKE_CASE ) ):
for j in graph[i]:
if color[i] == color[j]:
return False
return True
# Adjacency list of graph
SCREAMING_SNAKE_CASE = {0: [1, 3], 1: [0, 2], 2: [1, 3], 3: [0, 2], 4: []}
print(check_bipartite_dfs(graph))
| 719 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
SCREAMING_SNAKE_CASE = logging.get_logger(__name__)
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> List[List[ImageInput]]:
if isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__SCREAMING_SNAKE_CASE , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__SCREAMING_SNAKE_CASE ):
return [[videos]]
raise ValueError(f'''Could not make batched video from {videos}''' )
class lowerCamelCase ( lowercase__ ):
'''simple docstring'''
lowerCAmelCase_ : List[Any] = ['pixel_values']
def __init__( self , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = True , lowerCAmelCase = 1 / 255 , lowerCAmelCase = True , lowerCAmelCase = None , lowerCAmelCase = None , **lowerCAmelCase , ):
super().__init__(**lowerCAmelCase )
UpperCAmelCase_ = size if size is not None else {"shortest_edge": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else {"height": 224, "width": 224}
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
UpperCAmelCase_ = do_resize
UpperCAmelCase_ = size
UpperCAmelCase_ = do_center_crop
UpperCAmelCase_ = crop_size
UpperCAmelCase_ = resample
UpperCAmelCase_ = do_rescale
UpperCAmelCase_ = rescale_factor
UpperCAmelCase_ = do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
UpperCAmelCase_ = image_std if image_std is not None else IMAGENET_STANDARD_STD
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = PILImageResampling.BILINEAR , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
if "shortest_edge" in size:
UpperCAmelCase_ = get_resize_output_image_size(lowerCAmelCase , size["shortest_edge"] , default_to_square=lowerCAmelCase )
elif "height" in size and "width" in size:
UpperCAmelCase_ = (size["height"], size["width"])
else:
raise ValueError(f'''Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}''' )
return resize(lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
UpperCAmelCase_ = get_size_dict(lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(f'''Size must have \'height\' and \'width\' as keys. Got {size.keys()}''' )
return center_crop(lowerCAmelCase , size=(size["height"], size["width"]) , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return rescale(lowerCAmelCase , scale=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase = None , **lowerCAmelCase , ):
return normalize(lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase , data_format=lowerCAmelCase , **lowerCAmelCase )
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , ):
if do_resize and size is None or resample is None:
raise ValueError("Size and resample must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
UpperCAmelCase_ = to_numpy_array(lowerCAmelCase )
if do_resize:
UpperCAmelCase_ = self.resize(image=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase )
if do_center_crop:
UpperCAmelCase_ = self.center_crop(lowerCAmelCase , size=lowerCAmelCase )
if do_rescale:
UpperCAmelCase_ = self.rescale(image=lowerCAmelCase , scale=lowerCAmelCase )
if do_normalize:
UpperCAmelCase_ = self.normalize(image=lowerCAmelCase , mean=lowerCAmelCase , std=lowerCAmelCase )
UpperCAmelCase_ = to_channel_dimension_format(lowerCAmelCase , lowerCAmelCase )
return image
def A__ ( self , lowerCAmelCase , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = None , lowerCAmelCase = ChannelDimension.FIRST , **lowerCAmelCase , ):
UpperCAmelCase_ = do_resize if do_resize is not None else self.do_resize
UpperCAmelCase_ = resample if resample is not None else self.resample
UpperCAmelCase_ = do_center_crop if do_center_crop is not None else self.do_center_crop
UpperCAmelCase_ = do_rescale if do_rescale is not None else self.do_rescale
UpperCAmelCase_ = rescale_factor if rescale_factor is not None else self.rescale_factor
UpperCAmelCase_ = do_normalize if do_normalize is not None else self.do_normalize
UpperCAmelCase_ = image_mean if image_mean is not None else self.image_mean
UpperCAmelCase_ = image_std if image_std is not None else self.image_std
UpperCAmelCase_ = size if size is not None else self.size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , default_to_square=lowerCAmelCase )
UpperCAmelCase_ = crop_size if crop_size is not None else self.crop_size
UpperCAmelCase_ = get_size_dict(lowerCAmelCase , param_name="crop_size" )
if not valid_images(lowerCAmelCase ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
UpperCAmelCase_ = make_batched(lowerCAmelCase )
UpperCAmelCase_ = [
[
self._preprocess_image(
image=lowerCAmelCase , do_resize=lowerCAmelCase , size=lowerCAmelCase , resample=lowerCAmelCase , do_center_crop=lowerCAmelCase , crop_size=lowerCAmelCase , do_rescale=lowerCAmelCase , rescale_factor=lowerCAmelCase , do_normalize=lowerCAmelCase , image_mean=lowerCAmelCase , image_std=lowerCAmelCase , data_format=lowerCAmelCase , )
for img in video
]
for video in videos
]
UpperCAmelCase_ = {"pixel_values": videos}
return BatchFeature(data=lowerCAmelCase , tensor_type=lowerCAmelCase )
| 23 | 0 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ) -> Union[str, Any]:
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"
UpperCAmelCase_ = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert("RGB" )
UpperCAmelCase_ = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
UpperCAmelCase_ = transform(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(__SCREAMING_SNAKE_CASE )
return image
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
if "visual_encoder" in key:
UpperCAmelCase_ = re.sub("visual_encoder*" , "vision_model.encoder" , __SCREAMING_SNAKE_CASE )
if "blocks" in key:
UpperCAmelCase_ = re.sub(R"blocks" , "layers" , __SCREAMING_SNAKE_CASE )
if "attn" in key:
UpperCAmelCase_ = re.sub(R"attn" , "self_attn" , __SCREAMING_SNAKE_CASE )
if "norm1" in key:
UpperCAmelCase_ = re.sub(R"norm1" , "layer_norm1" , __SCREAMING_SNAKE_CASE )
if "norm2" in key:
UpperCAmelCase_ = re.sub(R"norm2" , "layer_norm2" , __SCREAMING_SNAKE_CASE )
if "encoder.norm" in key:
UpperCAmelCase_ = re.sub(R"encoder.norm" , "post_layernorm" , __SCREAMING_SNAKE_CASE )
if "encoder.patch_embed.proj" in key:
UpperCAmelCase_ = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , __SCREAMING_SNAKE_CASE )
if "encoder.pos_embed" in key:
UpperCAmelCase_ = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , __SCREAMING_SNAKE_CASE )
if "encoder.cls_token" in key:
UpperCAmelCase_ = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , __SCREAMING_SNAKE_CASE )
if "self_attn" in key:
UpperCAmelCase_ = re.sub(R"self_attn.proj" , "self_attn.projection" , __SCREAMING_SNAKE_CASE )
return key
@torch.no_grad()
def snake_case__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE=None ) -> Optional[Any]:
if config_path is not None:
UpperCAmelCase_ = BlipConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
UpperCAmelCase_ = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
UpperCAmelCase_ = BlipForConditionalGeneration(__SCREAMING_SNAKE_CASE ).eval()
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"
UpperCAmelCase_ = blip_decoder(pretrained=__SCREAMING_SNAKE_CASE , image_size=384 , vit="base" )
UpperCAmelCase_ = pt_model.eval()
UpperCAmelCase_ = pt_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = rename_key(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = value
hf_model.load_state_dict(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = 384
UpperCAmelCase_ = load_demo_image(image_size=__SCREAMING_SNAKE_CASE , device="cpu" )
UpperCAmelCase_ = BertTokenizer.from_pretrained("bert-base-uncased" )
UpperCAmelCase_ = tokenizer(["a picture of"] ).input_ids
UpperCAmelCase_ = hf_model.generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert out[0].tolist() == [3_0522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
UpperCAmelCase_ = hf_model.generate(__SCREAMING_SNAKE_CASE )
assert out[0].tolist() == [3_0522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
UpperCAmelCase_ = (
"https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"
)
UpperCAmelCase_ = blip_vqa(pretrained=__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE , vit="base" )
vqa_model.eval()
UpperCAmelCase_ = vqa_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = rename_key(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForQuestionAnswering(__SCREAMING_SNAKE_CASE )
hf_vqa_model.load_state_dict(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = ["How many dogs are in this image?"]
UpperCAmelCase_ = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="pt" ).input_ids
UpperCAmelCase_ = hf_vqa_model.generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
UpperCAmelCase_ = "https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"
UpperCAmelCase_ = blip_itm(pretrained=__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE , vit="base" )
itm_model.eval()
UpperCAmelCase_ = itm_model.state_dict()
for key in modified_state_dict.copy():
UpperCAmelCase_ = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = rename_key(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = value
UpperCAmelCase_ = BlipForImageTextRetrieval(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = ["A picture of a woman with a dog sitting in a beach"]
UpperCAmelCase_ = tokenizer(
__SCREAMING_SNAKE_CASE , return_tensors="pt" , padding="max_length" , truncation=__SCREAMING_SNAKE_CASE , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(__SCREAMING_SNAKE_CASE )
hf_itm_model.eval()
UpperCAmelCase_ = hf_itm_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_itm_head=__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = hf_itm_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_itm_head=__SCREAMING_SNAKE_CASE )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
SCREAMING_SNAKE_CASE = argparse.ArgumentParser()
parser.add_argument("--pytorch_dump_folder_path", default=None, type=str, help="Path to the output PyTorch model.")
parser.add_argument("--config_path", default=None, type=str, help="Path to hf config.json of model to convert")
SCREAMING_SNAKE_CASE = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 720 |
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 1
for i in range(1 , num + 1 ):
fact *= i
return fact
def snake_case__ ( __SCREAMING_SNAKE_CASE ) -> int:
UpperCAmelCase_ = 0
while number > 0:
UpperCAmelCase_ = number % 10
sum_of_digits += last_digit
UpperCAmelCase_ = number // 10 # Removing the last_digit from the given number
return sum_of_digits
def snake_case__ ( __SCREAMING_SNAKE_CASE = 100 ) -> int:
UpperCAmelCase_ = factorial(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = split_and_add(__SCREAMING_SNAKE_CASE )
return result
if __name__ == "__main__":
print(solution(int(input("Enter the Number: ").strip())))
| 23 | 0 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def snake_case__ ( ) -> int:
UpperCAmelCase_ = HfArgumentParser(__SCREAMING_SNAKE_CASE )
UpperCAmelCase_ = parser.parse_args_into_dataclasses()[0]
UpperCAmelCase_ = TensorFlowBenchmark(args=__SCREAMING_SNAKE_CASE )
try:
UpperCAmelCase_ = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
UpperCAmelCase_ = "Arg --no_{0} is no longer used, please use --no-{0} instead."
UpperCAmelCase_ = " ".join(str(__SCREAMING_SNAKE_CASE ).split(" " )[:-1] )
UpperCAmelCase_ = ""
UpperCAmelCase_ = eval(str(__SCREAMING_SNAKE_CASE ).split(" " )[-1] )
UpperCAmelCase_ = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__SCREAMING_SNAKE_CASE )
if len(__SCREAMING_SNAKE_CASE ) > 0:
UpperCAmelCase_ = full_error_msg + begin_error_msg + str(__SCREAMING_SNAKE_CASE )
raise ValueError(__SCREAMING_SNAKE_CASE )
benchmark.run()
if __name__ == "__main__":
main()
| 721 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
SCREAMING_SNAKE_CASE = {"configuration_xlnet": ["XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP", "XLNetConfig"]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizer"]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = ["XLNetTokenizerFast"]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"XLNetForMultipleChoice",
"XLNetForQuestionAnswering",
"XLNetForQuestionAnsweringSimple",
"XLNetForSequenceClassification",
"XLNetForTokenClassification",
"XLNetLMHeadModel",
"XLNetModel",
"XLNetPreTrainedModel",
"load_tf_weights_in_xlnet",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
SCREAMING_SNAKE_CASE = [
"TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST",
"TFXLNetForMultipleChoice",
"TFXLNetForQuestionAnsweringSimple",
"TFXLNetForSequenceClassification",
"TFXLNetForTokenClassification",
"TFXLNetLMHeadModel",
"TFXLNetMainLayer",
"TFXLNetModel",
"TFXLNetPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
SCREAMING_SNAKE_CASE = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 23 | 0 |
"""simple docstring"""
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case = "cpu" , snake_case = "openai/clip-vit-large-patch14" ) -> None:
_UpperCAmelCase = device
_UpperCAmelCase = CLIPTokenizerFast.from_pretrained(snake_case )
_UpperCAmelCase = [0.48145466, 0.4578275, 0.40821073]
_UpperCAmelCase = [0.26862954, 0.26130258, 0.27577711]
_UpperCAmelCase = torchvision.transforms.Normalize(self.image_mean , self.image_std )
_UpperCAmelCase = torchvision.transforms.Resize(224 )
_UpperCAmelCase = torchvision.transforms.CenterCrop(224 )
def lowerCamelCase_ ( self , snake_case ) -> Optional[Any]:
_UpperCAmelCase = self.resize(snake_case )
_UpperCAmelCase = self.center_crop(snake_case )
_UpperCAmelCase = self.normalize(snake_case )
return images
def __call__( self , snake_case=None , snake_case=None , **snake_case ) -> List[Any]:
_UpperCAmelCase = self.tokenizer(text=snake_case , **snake_case )
_UpperCAmelCase = self.preprocess_img(snake_case )
_UpperCAmelCase = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case=10 , snake_case=0.01 , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=None , snake_case=False , snake_case=True , snake_case="image" , snake_case=True , snake_case=False , snake_case=False , snake_case=False , ) -> None:
super().__init__()
_UpperCAmelCase = None
_UpperCAmelCase = device if device else get_device()
if vqgan:
_UpperCAmelCase = vqgan
else:
_UpperCAmelCase = load_vqgan(self.device , conf_path=snake_case , ckpt_path=snake_case )
self.vqgan.eval()
if clip:
_UpperCAmelCase = clip
else:
_UpperCAmelCase = CLIPModel.from_pretrained('openai/clip-vit-base-patch32' )
self.clip.to(self.device )
_UpperCAmelCase = ProcessorGradientFlow(device=self.device )
_UpperCAmelCase = iterations
_UpperCAmelCase = lr
_UpperCAmelCase = log
_UpperCAmelCase = make_grid
_UpperCAmelCase = return_val
_UpperCAmelCase = quantize
_UpperCAmelCase = self.vqgan.decoder.z_shape
def lowerCamelCase_ ( self , snake_case=None , snake_case=None , snake_case=5 , snake_case=True ) -> int:
_UpperCAmelCase = []
if output_path is None:
_UpperCAmelCase = './animation.gif'
if input_path is None:
_UpperCAmelCase = self.save_path
_UpperCAmelCase = sorted(glob(input_path + '/*' ) )
if not len(snake_case ):
raise ValueError(
'No images found in save path, aborting (did you pass save_intermediate=True to the generate'
' function?)' )
if len(snake_case ) == 1:
print('Only one image found in save path, (did you pass save_intermediate=True to the generate function?)' )
_UpperCAmelCase = total_duration / len(snake_case )
_UpperCAmelCase = [frame_duration] * len(snake_case )
if extend_frames:
_UpperCAmelCase = 1.5
_UpperCAmelCase = 3
for file_name in paths:
if file_name.endswith('.png' ):
images.append(imageio.imread(snake_case ) )
imageio.mimsave(snake_case , snake_case , duration=snake_case )
print(f'gif saved to {output_path}' )
def lowerCamelCase_ ( self , snake_case=None , snake_case=None ) -> Optional[Any]:
if not (path or img):
raise ValueError('Input either path or tensor' )
if img is not None:
raise NotImplementedError
_UpperCAmelCase = preprocess(Image.open(snake_case ) , target_image_size=256 ).to(self.device )
_UpperCAmelCase = preprocess_vqgan(snake_case )
_UpperCAmelCase , *_UpperCAmelCase = self.vqgan.encode(snake_case )
return z
def lowerCamelCase_ ( self , snake_case ) -> Any:
_UpperCAmelCase = self.latent.detach().requires_grad_()
_UpperCAmelCase = base_latent + transform_vector
if self.quantize:
_UpperCAmelCase , *_UpperCAmelCase = self.vqgan.quantize(snake_case )
else:
_UpperCAmelCase = trans_latent
return self.vqgan.decode(snake_case )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None ) -> Optional[Any]:
_UpperCAmelCase = self.clip_preprocessor(text=snake_case , images=snake_case , return_tensors='pt' , padding=snake_case )
_UpperCAmelCase = self.clip(**snake_case )
_UpperCAmelCase = clip_outputs.logits_per_image
if weights is not None:
_UpperCAmelCase = similarity_logits * weights
return similarity_logits.sum()
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Union[str, Any]:
_UpperCAmelCase = self._get_clip_similarity(pos_prompts['prompts'] , snake_case , weights=(1 / pos_prompts['weights']) )
if neg_prompts:
_UpperCAmelCase = self._get_clip_similarity(neg_prompts['prompts'] , snake_case , weights=neg_prompts['weights'] )
else:
_UpperCAmelCase = torch.tensor([1] , device=self.device )
_UpperCAmelCase = -torch.log(snake_case ) + torch.log(snake_case )
return loss
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = torch.randn_like(self.latent , requires_grad=snake_case , device=self.device )
_UpperCAmelCase = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
_UpperCAmelCase = self._add_vector(snake_case )
_UpperCAmelCase = loop_post_process(snake_case )
_UpperCAmelCase = self._get_CLIP_loss(snake_case , snake_case , snake_case )
print('CLIP loss' , snake_case )
if self.log:
wandb.log({'CLIP Loss': clip_loss} )
clip_loss.backward(retain_graph=snake_case )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Any:
wandb.init(reinit=snake_case , project='face-editor' )
wandb.config.update({'Positive Prompts': positive_prompts} )
wandb.config.update({'Negative Prompts': negative_prompts} )
wandb.config.update({'lr': self.lr, 'iterations': self.iterations} )
if image_path:
_UpperCAmelCase = Image.open(snake_case )
_UpperCAmelCase = image.resize((256, 256) )
wandb.log('Original Image' , wandb.Image(snake_case ) )
def lowerCamelCase_ ( self , snake_case ) -> Optional[int]:
if not prompts:
return []
_UpperCAmelCase = []
_UpperCAmelCase = []
if isinstance(snake_case , snake_case ):
_UpperCAmelCase = [prompt.strip() for prompt in prompts.split('|' )]
for prompt in prompts:
if isinstance(snake_case , (tuple, list) ):
_UpperCAmelCase = prompt[0]
_UpperCAmelCase = float(prompt[1] )
elif ":" in prompt:
_UpperCAmelCase , _UpperCAmelCase = prompt.split(':' )
_UpperCAmelCase = float(snake_case )
else:
_UpperCAmelCase = prompt
_UpperCAmelCase = 1.0
processed_prompts.append(snake_case )
weights.append(snake_case )
return {
"prompts": processed_prompts,
"weights": torch.tensor(snake_case , device=self.device ),
}
def lowerCamelCase_ ( self , snake_case , snake_case=None , snake_case=None , snake_case=True , snake_case=False , snake_case=True , snake_case=True , snake_case=None , ) -> Optional[Any]:
if image_path:
_UpperCAmelCase = self._get_latent(snake_case )
else:
_UpperCAmelCase = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(snake_case , snake_case , snake_case )
assert pos_prompts, "You must provide at least one positive prompt."
_UpperCAmelCase = self.process_prompts(snake_case )
_UpperCAmelCase = self.process_prompts(snake_case )
if save_final and save_path is None:
_UpperCAmelCase = os.path.join('./outputs/' , '_'.join(pos_prompts['prompts'] ) )
if not os.path.exists(snake_case ):
os.makedirs(snake_case )
else:
_UpperCAmelCase = save_path + '_' + get_timestamp()
os.makedirs(snake_case )
_UpperCAmelCase = save_path
_UpperCAmelCase = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print('Original Image' )
show_pil(custom_to_pil(snake_case ) )
_UpperCAmelCase = loop_post_process(snake_case )
for iter, transformed_img in enumerate(self._optimize_CLIP(snake_case , snake_case , snake_case ) ):
if show_intermediate:
show_pil(snake_case )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'iter_{iter:03d}.png' ) )
if self.log:
wandb.log({'Image': wandb.Image(snake_case )} )
if show_final:
show_pil(snake_case )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'iter_{iter:03d}_final.png' ) )
| 24 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''beit'''
def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str:
super().__init__(**snake_case )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = use_mask_token
_UpperCAmelCase = use_absolute_position_embeddings
_UpperCAmelCase = use_relative_position_bias
_UpperCAmelCase = use_shared_relative_position_bias
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCAmelCase = out_indices
_UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = semantic_loss_ignore_index
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 24 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=18 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=None , snake_case=True , ) -> Dict:
_UpperCAmelCase = size if size is not None else {'shortest_edge': 20}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_flip_channel_order
def lowerCamelCase_ ( self ) -> Any:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = MobileViTImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = MobileViTImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> Dict:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
self.assertTrue(hasattr(snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(snake_case , 'center_crop' ) )
self.assertTrue(hasattr(snake_case , 'do_flip_channel_order' ) )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 20} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
pass
def lowerCamelCase_ ( self ) -> Tuple:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase_ ( self ) -> List[str]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase_ ( self ) -> Dict:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 24 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowercase = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_05_22, type=int)
lowercase = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
lowercase = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowercase = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase = [0] * args.vocab_size
for k, v in counter.items():
lowercase = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 24 | 1 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowercase = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 |
"""simple docstring"""
from itertools import permutations
def UpperCAmelCase ( A : tuple ):
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(A ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase ( A : int = 10 ):
'''simple docstring'''
return sum(
int(''.join(map(A , A ) ) )
for num in permutations(range(A ) )
if is_substring_divisible(A ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 24 | 1 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import OPTConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import GPTaTokenizer, TFOPTForCausalLM, TFOPTModel
def UpperCAmelCase ( A : Optional[int] , A : Any , A : str=None , A : Tuple=None ):
'''simple docstring'''
if attention_mask is None:
_UpperCAmelCase = tf.cast(tf.math.not_equal(A , config.pad_token_id ) , tf.inta )
return {"input_ids": input_ids, "attention_mask": attention_mask}
@require_tf
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = OPTConfig
_UpperCAmelCase = {}
_UpperCAmelCase = '''gelu'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=False , snake_case=99 , snake_case=16 , snake_case=2 , snake_case=4 , snake_case=4 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=20 , snake_case=2 , snake_case=1 , snake_case=0 , snake_case=16 , snake_case=16 , ) -> Optional[int]:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = embed_dim
_UpperCAmelCase = word_embed_proj_dim
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
_UpperCAmelCase = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = tf.concat([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , embed_dim=self.embed_dim , word_embed_proj_dim=self.word_embed_proj_dim , is_encoder_decoder=snake_case , **self.config_updates , )
_UpperCAmelCase = prepare_opt_inputs_dict(snake_case , snake_case )
return config, inputs_dict
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = TFOPTModel(config=snake_case )
_UpperCAmelCase = inputs_dict['input_ids']
_UpperCAmelCase = input_ids[:1, :]
_UpperCAmelCase = inputs_dict['attention_mask'][:1, :]
_UpperCAmelCase = 1
# first forward pass
_UpperCAmelCase = model(snake_case , attention_mask=snake_case , use_cache=snake_case )
_UpperCAmelCase , _UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
_UpperCAmelCase = tf.cast(ids_tensor((self.batch_size, 3) , 2 ) , tf.inta )
# append to next input_ids and
_UpperCAmelCase = tf.concat([input_ids, next_tokens] , axis=-1 )
_UpperCAmelCase = tf.concat([attention_mask, next_attn_mask] , axis=-1 )
_UpperCAmelCase = model(snake_case , attention_mask=snake_case )[0]
_UpperCAmelCase = model(snake_case , attention_mask=snake_case , past_key_values=snake_case )[0]
self.parent.assertEqual(next_tokens.shape[1] , output_from_past.shape[1] )
# select random slice
_UpperCAmelCase = int(ids_tensor((1,) , output_from_past.shape[-1] ) )
_UpperCAmelCase = output_from_no_past[:, -3:, random_slice_idx]
_UpperCAmelCase = output_from_past[:, :, random_slice_idx]
# test that outputs are equal for slice
tf.debugging.assert_near(snake_case , snake_case , rtol=1E-3 )
@require_tf
class lowercase__ ( A, A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (TFOPTModel, TFOPTForCausalLM) if is_tf_available() else ()
_UpperCAmelCase = (TFOPTForCausalLM,) if is_tf_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': TFOPTModel, '''text-generation''': TFOPTForCausalLM} if is_tf_available() else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = 10
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = TFOPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*snake_case )
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(snake_case , snake_case ):
if hasattr(snake_case , 'weight' ):
return embedding_layer.weight
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model.build()
if hasattr(snake_case , 'weight' ):
return embedding_layer.weight
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10]:
# build the embeddings
_UpperCAmelCase = model_class(config=snake_case )
_UpperCAmelCase = _get_word_embedding_weight(snake_case , model.get_input_embeddings() )
_UpperCAmelCase = _get_word_embedding_weight(snake_case , model.get_output_embeddings() )
# reshape the embeddings
model.resize_token_embeddings(snake_case )
_UpperCAmelCase = _get_word_embedding_weight(snake_case , model.get_input_embeddings() )
_UpperCAmelCase = _get_word_embedding_weight(snake_case , model.get_output_embeddings() )
# check that the resized embeddings size matches the desired size.
_UpperCAmelCase = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0] , snake_case )
# check that weights remain the same after resizing
_UpperCAmelCase = True
for pa, pa in zip(old_input_embeddings.value() , new_input_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_UpperCAmelCase = False
self.assertTrue(snake_case )
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0] , snake_case )
_UpperCAmelCase = True
for pa, pa in zip(old_output_embeddings.value() , new_output_embeddings.value() ):
if tf.math.reduce_sum(tf.math.abs(pa - pa ) ) > 0:
_UpperCAmelCase = False
self.assertTrue(snake_case )
def UpperCAmelCase ( A : Any ):
'''simple docstring'''
return tf.constant(A , dtype=tf.intaa )
@require_tf
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = 99
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = tf.ones((4, 1) , dtype=tf.intaa ) * 2
_UpperCAmelCase = tf.concat([ids_tensor((4, 6) , self.vocab_size - 3 ) + 3, eos_column_vector] , axis=1 )
_UpperCAmelCase = input_ids.shape[0]
_UpperCAmelCase = OPTConfig(
vocab_size=self.vocab_size , hidden_size=24 , num_hidden_layers=2 , num_attention_heads=2 , ffn_dim=32 , max_position_embeddings=48 , eos_token_id=2 , pad_token_id=1 , bos_token_id=0 , )
return config, input_ids, batch_size
@require_sentencepiece
@require_tf
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = TFOPTModel.from_pretrained('facebook/opt-350m' )
_UpperCAmelCase = _long_tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
_UpperCAmelCase = tf.not_equal(snake_case , model.config.pad_token_id )
with tf.GradientTape():
_UpperCAmelCase = model(input_ids=snake_case , attention_mask=snake_case ).last_hidden_state
_UpperCAmelCase = (1, 11, 512)
self.assertEqual(output.shape , snake_case )
_UpperCAmelCase = tf.constant(
[[-0.2873, -1.9218, -0.3033], [-1.2710, -0.1338, -0.1902], [0.4095, 0.1214, -1.3121]] )
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case , atol=4E-3 ) )
_UpperCAmelCase = tf.function(snake_case , jit_compile=snake_case )
_UpperCAmelCase = xla_generate(snake_case , snake_case )[0]
self.assertTrue(np.allclose(output[:, :3, :3] , snake_case , atol=4E-2 ) )
@require_tf
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Any:
super().setUp()
_UpperCAmelCase = 'facebook/opt-350m'
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = TFOPTForCausalLM.from_pretrained(self.path_model )
_UpperCAmelCase = GPTaTokenizer.from_pretrained(self.path_model )
_UpperCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of',
'Paris is the capital of France and',
'Computers and mobile phones have taken',
]
# verify that prompt without BOS token is identical to Metaseq -> add_special_tokens=False
_UpperCAmelCase = tokenizer(snake_case , return_tensors='tf' , padding=snake_case , add_special_tokens=snake_case )
_UpperCAmelCase = tf.math.reduce_mean(model(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
_UpperCAmelCase = tf.constant(
[
[1.3851, -13.8923, -10.5229, -10.7533, -0.2309, -10.2384, -0.5365, -9.0947, -5.1670],
[-4.7073, -10.6276, -3.9415, -21.5242, -0.2822, -0.2822, -0.2822, -0.2822, -0.2822],
[0.6247, -3.4229, -8.9179, -1.4297, -14.1650, 1.4146, -9.0218, -0.2703, -0.2703],
[6.4783, -1.9913, -10.7926, -2.3336, 1.5092, -0.9974, -6.8213, 1.3477, 1.3477],
] )
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-4 ) )
_UpperCAmelCase = tf.function(snake_case , jit_compile=snake_case )
_UpperCAmelCase = tf.math.reduce_mean(xla_generate(inputs.input_ids , attention_mask=inputs.attention_mask )[0] , axis=-1 )
self.assertTrue(np.allclose(snake_case , snake_case , atol=1E-4 ) )
@require_tf
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@property
def lowerCamelCase_ ( self ) -> str:
return [
"Today is a beautiful day and I want",
"In the city of",
"Paris is the capital of France and",
"Computers and mobile phones have taken",
]
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = 'facebook/opt-125m'
_UpperCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of New York, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_UpperCAmelCase = []
_UpperCAmelCase = GPTaTokenizer.from_pretrained(snake_case )
_UpperCAmelCase = TFOPTForCausalLM.from_pretrained(snake_case )
for prompt in self.prompts:
_UpperCAmelCase = tokenizer(snake_case , return_tensors='tf' ).input_ids
_UpperCAmelCase = model.generate(snake_case , max_length=10 )
_UpperCAmelCase = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
predicted_outputs += generated_string
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = 'facebook/opt-350m'
_UpperCAmelCase = GPTaTokenizer.from_pretrained(snake_case )
_UpperCAmelCase = TFOPTForCausalLM.from_pretrained(snake_case )
_UpperCAmelCase = 'left'
# use different length sentences to test batching
_UpperCAmelCase = [
'Hello, my dog is a little',
'Today, I',
]
_UpperCAmelCase = tokenizer(snake_case , return_tensors='tf' , padding=snake_case )
_UpperCAmelCase = inputs['input_ids']
_UpperCAmelCase = model.generate(input_ids=snake_case , attention_mask=inputs['attention_mask'] )
_UpperCAmelCase = tokenizer(sentences[0] , return_tensors='tf' ).input_ids
_UpperCAmelCase = model.generate(input_ids=snake_case )
_UpperCAmelCase = inputs_non_padded.shape[-1] - tf.math.reduce_sum(
tf.cast(inputs['attention_mask'][-1] , tf.intaa ) )
_UpperCAmelCase = tokenizer(sentences[1] , return_tensors='tf' ).input_ids
_UpperCAmelCase = model.generate(input_ids=snake_case , max_length=model.config.max_length - num_paddings )
_UpperCAmelCase = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
_UpperCAmelCase = tokenizer.decode(output_non_padded[0] , skip_special_tokens=snake_case )
_UpperCAmelCase = tokenizer.decode(output_padded[0] , skip_special_tokens=snake_case )
_UpperCAmelCase = [
'Hello, my dog is a little bit of a dork.\nI\'m a little bit',
'Today, I was in the middle of a conversation with a friend about the',
]
self.assertListEqual(snake_case , snake_case )
self.assertListEqual(snake_case , [non_padded_sentence, padded_sentence] )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = 'facebook/opt-350m'
_UpperCAmelCase = [
'Today is a beautiful day and I want to',
'In the city of San Francisco, the city',
'Paris is the capital of France and the capital',
'Computers and mobile phones have taken over the',
]
_UpperCAmelCase = []
_UpperCAmelCase = GPTaTokenizer.from_pretrained(snake_case )
_UpperCAmelCase = TFOPTForCausalLM.from_pretrained(snake_case )
for prompt in self.prompts:
_UpperCAmelCase = tokenizer(snake_case , return_tensors='tf' ).input_ids
_UpperCAmelCase = model.generate(snake_case , max_length=10 )
_UpperCAmelCase = tokenizer.batch_decode(snake_case , skip_special_tokens=snake_case )
predicted_outputs += generated_string
self.assertListEqual(snake_case , snake_case )
| 24 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 1 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def UpperCAmelCase ( A : str , A : complex , A : str = "x" , A : float = 10**-10 , A : int = 1 , ):
'''simple docstring'''
_UpperCAmelCase = symbols(A )
_UpperCAmelCase = lambdify(A , A )
_UpperCAmelCase = lambdify(A , diff(A , A ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(A ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(A ) / diff_function(
A )
else:
raise ZeroDivisionError('Could not find root' ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F'''The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}''')
# Find root of polynomial
# Find fourth Root of 5
print(F'''The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5J)}''')
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
F'''{newton_raphson('log(y) - 1', 2, variable='y')}''',
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
F'''{newton_raphson('exp(x) - 1', 10, precision=0.005)}''',
)
# Find root of cos(x)
print(F'''The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}''')
| 24 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 1 |
"""simple docstring"""
import math
import os
import sys
def UpperCAmelCase ( A : str ):
'''simple docstring'''
_UpperCAmelCase = ''
try:
with open(A , 'rb' ) as binary_file:
_UpperCAmelCase = binary_file.read()
for dat in data:
_UpperCAmelCase = f'{dat:08b}'
result += curr_byte
return result
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase ( A : dict[str, str] , A : str , A : int , A : str ):
'''simple docstring'''
lexicon.pop(A )
_UpperCAmelCase = last_match_id
if math.loga(A ).is_integer():
for curr_key in lexicon:
_UpperCAmelCase = '0' + lexicon[curr_key]
_UpperCAmelCase = bin(A )[2:]
def UpperCAmelCase ( A : str ):
'''simple docstring'''
_UpperCAmelCase = {'0': '0', '1': '1'}
_UpperCAmelCase , _UpperCAmelCase = '', ''
_UpperCAmelCase = len(A )
for i in range(len(A ) ):
curr_string += data_bits[i]
if curr_string not in lexicon:
continue
_UpperCAmelCase = lexicon[curr_string]
result += last_match_id
add_key_to_lexicon(A , A , A , A )
index += 1
_UpperCAmelCase = ''
while curr_string != "" and curr_string not in lexicon:
curr_string += "0"
if curr_string != "":
_UpperCAmelCase = lexicon[curr_string]
result += last_match_id
return result
def UpperCAmelCase ( A : str , A : str ):
'''simple docstring'''
_UpperCAmelCase = os.path.getsize(A )
_UpperCAmelCase = bin(A )[2:]
_UpperCAmelCase = len(A )
return "0" * (length_length - 1) + file_length_binary + compressed
def UpperCAmelCase ( A : str , A : str ):
'''simple docstring'''
_UpperCAmelCase = 8
try:
with open(A , 'wb' ) as opened_file:
_UpperCAmelCase = [
to_write[i : i + byte_length]
for i in range(0 , len(A ) , A )
]
if len(result_byte_array[-1] ) % byte_length == 0:
result_byte_array.append('10000000' )
else:
result_byte_array[-1] += "1" + "0" * (
byte_length - len(result_byte_array[-1] ) - 1
)
for elem in result_byte_array:
opened_file.write(int(A , 2 ).to_bytes(1 , byteorder='big' ) )
except OSError:
print('File not accessible' )
sys.exit()
def UpperCAmelCase ( A : str , A : str ):
'''simple docstring'''
_UpperCAmelCase = read_file_binary(A )
_UpperCAmelCase = compress_data(A )
_UpperCAmelCase = add_file_length(A , A )
write_file_binary(A , A )
if __name__ == "__main__":
compress(sys.argv[1], sys.argv[2])
| 24 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase__ ( A, A ):
'''simple docstring'''
_UpperCAmelCase = '''swin'''
_UpperCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]:
super().__init__(**snake_case )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(snake_case )
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) )
_UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 24 | 1 |
"""simple docstring"""
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
_UpperCAmelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def UpperCAmelCase ( A : int ):
'''simple docstring'''
return sum(int(A ) for c in str(abs(A ) ) )
def UpperCAmelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A : Callable , A : int ) -> None:
_UpperCAmelCase = f'{func.__name__}({value})'
_UpperCAmelCase = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(A )} -- {timing:.4f} seconds' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(A , A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case = 16 , snake_case = 88 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = 32 , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = "geglu" , snake_case = None , ) -> str:
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case = True , ) -> Any:
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=snake_case )
| 24 | 1 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
if partitions <= 0:
raise ValueError('partitions must be a positive number!' )
if partitions > number_of_bytes:
raise ValueError('partitions can not > number_of_bytes!' )
_UpperCAmelCase = number_of_bytes // partitions
_UpperCAmelCase = []
for i in range(A ):
_UpperCAmelCase = i * bytes_per_partition + 1
_UpperCAmelCase = (
number_of_bytes if i == partitions - 1 else (i + 1) * bytes_per_partition
)
allocation_list.append(f'{start_bytes}-{end_bytes}' )
return allocation_list
if __name__ == "__main__":
import doctest
doctest.testmod()
| 24 |
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) )
self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) )
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = patch_stride
_UpperCAmelCase = patch_padding
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = num_heads
_UpperCAmelCase = stride_kv
_UpperCAmelCase = depth
_UpperCAmelCase = cls_token
_UpperCAmelCase = attention_drop_rate
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self ) -> List[str]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = CvtModel(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case )
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = CvtForImageClassification(snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( A, A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = CvtModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self ) -> Union[str, Any]:
return
@unittest.skip(reason='Cvt does not output attentions' )
def lowerCamelCase_ ( self ) -> str:
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def lowerCamelCase_ ( self ) -> int:
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
pass
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
def check_hidden_states_output(snake_case , snake_case , snake_case ):
_UpperCAmelCase = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(snake_case ) , snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase_ ( self ) -> Dict:
pass
@slow
def lowerCamelCase_ ( self ) -> Dict:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = CvtModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self ) -> List[Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**snake_case )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case )
_UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
| 24 | 1 |
"""simple docstring"""
import os
import re
import shutil
from argparse import ArgumentParser, Namespace
from datasets.commands import BaseDatasetsCLICommand
from datasets.utils.logging import get_logger
lowercase = '''<<<<<<< This should probably be modified because it mentions: '''
lowercase = '''=======
>>>>>>>
'''
lowercase = [
'''TextEncoderConfig''',
'''ByteTextEncoder''',
'''SubwordTextEncoder''',
'''encoder_config''',
'''maybe_build_from_corpus''',
'''manual_dir''',
]
lowercase = [
# (pattern, replacement)
# Order is important here for some replacements
(r'''tfds\.core''', r'''datasets'''),
(r'''tf\.io\.gfile\.GFile''', r'''open'''),
(r'''tf\.([\w\d]+)''', r'''datasets.Value(\'\1\')'''),
(r'''tfds\.features\.Text\(\)''', r'''datasets.Value(\'string\')'''),
(r'''tfds\.features\.Text\(''', r'''datasets.Value(\'string\'),'''),
(r'''features\s*=\s*tfds.features.FeaturesDict\(''', r'''features=datasets.Features('''),
(r'''tfds\.features\.FeaturesDict\(''', r'''dict('''),
(r'''The TensorFlow Datasets Authors''', r'''The TensorFlow Datasets Authors and the HuggingFace Datasets Authors'''),
(r'''tfds\.''', r'''datasets.'''),
(r'''dl_manager\.manual_dir''', r'''self.config.data_dir'''),
(r'''self\.builder_config''', r'''self.config'''),
]
def UpperCAmelCase ( A : Namespace ):
'''simple docstring'''
return ConvertCommand(args.tfds_path , args.datasets_directory )
class lowercase__ ( A ):
'''simple docstring'''
@staticmethod
def lowerCamelCase_ ( snake_case ) -> str:
_UpperCAmelCase = parser.add_parser(
'convert' , help='Convert a TensorFlow Datasets dataset to a HuggingFace Datasets dataset.' , )
train_parser.add_argument(
'--tfds_path' , type=snake_case , required=snake_case , help='Path to a TensorFlow Datasets folder to convert or a single tfds file to convert.' , )
train_parser.add_argument(
'--datasets_directory' , type=snake_case , required=snake_case , help='Path to the HuggingFace Datasets folder.' )
train_parser.set_defaults(func=snake_case )
def __init__( self , snake_case , snake_case , *snake_case ) -> Any:
_UpperCAmelCase = get_logger('datasets-cli/converting' )
_UpperCAmelCase = tfds_path
_UpperCAmelCase = datasets_directory
def lowerCamelCase_ ( self ) -> Optional[Any]:
if os.path.isdir(self._tfds_path ):
_UpperCAmelCase = os.path.abspath(self._tfds_path )
elif os.path.isfile(self._tfds_path ):
_UpperCAmelCase = os.path.dirname(self._tfds_path )
else:
raise ValueError('--tfds_path is neither a directory nor a file. Please check path.' )
_UpperCAmelCase = os.path.abspath(self._datasets_directory )
self._logger.info(f'Converting datasets from {abs_tfds_path} to {abs_datasets_path}' )
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = {}
if os.path.isdir(self._tfds_path ):
_UpperCAmelCase = os.listdir(snake_case )
else:
_UpperCAmelCase = [os.path.basename(self._tfds_path )]
for f_name in file_names:
self._logger.info(f'Looking at file {f_name}' )
_UpperCAmelCase = os.path.join(snake_case , snake_case )
_UpperCAmelCase = os.path.join(snake_case , snake_case )
if not os.path.isfile(snake_case ) or "__init__" in f_name or "_test" in f_name or ".py" not in f_name:
self._logger.info('Skipping file' )
continue
with open(snake_case , encoding='utf-8' ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = []
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = []
for line in lines:
_UpperCAmelCase = line
# Convert imports
if "import tensorflow.compat.v2 as tf" in out_line:
continue
elif "@tfds.core" in out_line:
continue
elif "builder=self" in out_line:
continue
elif "import tensorflow_datasets.public_api as tfds" in out_line:
_UpperCAmelCase = 'import datasets\n'
elif "import tensorflow" in out_line:
# order is important here
_UpperCAmelCase = ''
continue
elif "from absl import logging" in out_line:
_UpperCAmelCase = 'from datasets import logging\n'
elif "getLogger" in out_line:
_UpperCAmelCase = out_line.replace('getLogger' , 'get_logger' )
elif any(expression in out_line for expression in TO_HIGHLIGHT ):
_UpperCAmelCase = True
_UpperCAmelCase = list(filter(lambda snake_case : e in out_line , snake_case ) )
out_lines.append(HIGHLIGHT_MESSAGE_PRE + str(snake_case ) + '\n' )
out_lines.append(snake_case )
out_lines.append(snake_case )
continue
else:
for pattern, replacement in TO_CONVERT:
_UpperCAmelCase = re.sub(snake_case , snake_case , snake_case )
# Take care of saving utilities (to later move them together with main script)
if "tensorflow_datasets" in out_line:
_UpperCAmelCase = re.match(r'from\stensorflow_datasets.*import\s([^\.\r\n]+)' , snake_case )
tfds_imports.extend(imp.strip() for imp in match.group(1 ).split(',' ) )
_UpperCAmelCase = 'from . import ' + match.group(1 )
# Check we have not forget anything
if "tf." in out_line or "tfds." in out_line or "tensorflow_datasets" in out_line:
raise ValueError(f'Error converting {out_line.strip()}' )
if "GeneratorBasedBuilder" in out_line or "BeamBasedBuilder" in out_line:
_UpperCAmelCase = True
out_lines.append(snake_case )
if is_builder or "wmt" in f_name:
# We create a new directory for each dataset
_UpperCAmelCase = f_name.replace('.py' , '' )
_UpperCAmelCase = os.path.join(snake_case , snake_case )
_UpperCAmelCase = os.path.join(snake_case , snake_case )
os.makedirs(snake_case , exist_ok=snake_case )
self._logger.info(f'Adding directory {output_dir}' )
imports_to_builder_map.update({imp: output_dir for imp in tfds_imports} )
else:
# Utilities will be moved at the end
utils_files.append(snake_case )
if needs_manual_update:
with_manual_update.append(snake_case )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.writelines(snake_case )
self._logger.info(f'Converted in {output_file}' )
for utils_file in utils_files:
try:
_UpperCAmelCase = os.path.basename(snake_case )
_UpperCAmelCase = imports_to_builder_map[f_name.replace('.py' , '' )]
self._logger.info(f'Moving {dest_folder} to {utils_file}' )
shutil.copy(snake_case , snake_case )
except KeyError:
self._logger.error(f'Cannot find destination folder for {utils_file}. Please copy manually.' )
if with_manual_update:
for file_path in with_manual_update:
self._logger.warning(
f'You need to manually update file {file_path} to remove configurations using \'TextEncoderConfig\'.' )
| 24 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def UpperCAmelCase ( A : int , A : int , A : int ):
'''simple docstring'''
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
_UpperCAmelCase = b * b - 4 * a * c
_UpperCAmelCase = (-b + sqrt(A )) / (2 * a)
_UpperCAmelCase = (-b - sqrt(A )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 )
print(f'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 24 | 1 |
"""simple docstring"""
lowercase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
lowercase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
lowercase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
}
| 24 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = BarthezTokenizer
_UpperCAmelCase = BarthezTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def lowerCamelCase_ ( self ) -> Optional[int]:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case )
_UpperCAmelCase = tokenizer
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case ) , 101122 )
def lowerCamelCase_ ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors='pt' )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsé.'
_UpperCAmelCase = tokenizer.tokenize(snake_case )
_UpperCAmelCase = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
_UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
_UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(snake_case )
_UpperCAmelCase = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
@slow
def lowerCamelCase_ ( self ) -> Optional[int]:
# fmt: off
_UpperCAmelCase = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=snake_case , )
| 24 | 1 |
"""simple docstring"""
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def UpperCAmelCase ( A : int , A : int , A : Any ):
'''simple docstring'''
_UpperCAmelCase = AlbertConfig.from_json_file(A )
print(f'Building PyTorch model from configuration: {config}' )
_UpperCAmelCase = AlbertForPreTraining(A )
# Load weights from tf checkpoint
load_tf_weights_in_albert(A , A , A )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , A )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--albert_config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained ALBERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 24 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = DiTPipeline
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
_UpperCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case , )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]:
if str(snake_case ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(snake_case )
else:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
_UpperCAmelCase = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = pipe(**snake_case ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case , 1E-3 )
def lowerCamelCase_ ( self ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=snake_case , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCamelCase_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf']
_UpperCAmelCase = pipe.get_label_ids(snake_case )
_UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(snake_case , snake_case ):
_UpperCAmelCase = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella']
_UpperCAmelCase = pipe.get_label_ids(snake_case )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(snake_case , snake_case ):
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 24 | 1 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase = logging.get_logger(__name__)
lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset)
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A , info=A , split=A , axis=A )
else:
return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
| 24 |
"""simple docstring"""
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
_UpperCAmelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def UpperCAmelCase ( A : int ):
'''simple docstring'''
return sum(int(A ) for c in str(abs(A ) ) )
def UpperCAmelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A : Callable , A : int ) -> None:
_UpperCAmelCase = f'{func.__name__}({value})'
_UpperCAmelCase = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(A )} -- {timing:.4f} seconds' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(A , A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 1 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def UpperCAmelCase ( A : Union[List, PIL.Image.Image, torch.Tensor] ):
'''simple docstring'''
warnings.warn(
'The preprocess method is deprecated and will be removed in a future version. Please'
' use VaeImageProcessor.preprocess instead' , A , )
if isinstance(A , torch.Tensor ):
return image
elif isinstance(A , PIL.Image.Image ):
_UpperCAmelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = image[0].size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_UpperCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION['lanczos'] ) )[None, :] for i in image]
_UpperCAmelCase = np.concatenate(A , axis=0 )
_UpperCAmelCase = np.array(A ).astype(np.floataa ) / 255.0
_UpperCAmelCase = image.transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = 2.0 * image - 1.0
_UpperCAmelCase = torch.from_numpy(A )
elif isinstance(image[0] , torch.Tensor ):
_UpperCAmelCase = torch.cat(A , dim=0 )
return image
def UpperCAmelCase ( A : Union[List, PIL.Image.Image, torch.Tensor] ):
'''simple docstring'''
if isinstance(A , torch.Tensor ):
return mask
elif isinstance(A , PIL.Image.Image ):
_UpperCAmelCase = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
_UpperCAmelCase , _UpperCAmelCase = mask[0].size
_UpperCAmelCase , _UpperCAmelCase = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = [np.array(m.convert('L' ).resize((w, h) , resample=PIL_INTERPOLATION['nearest'] ) )[None, :] for m in mask]
_UpperCAmelCase = np.concatenate(A , axis=0 )
_UpperCAmelCase = mask.astype(np.floataa ) / 255.0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = torch.from_numpy(A )
elif isinstance(mask[0] , torch.Tensor ):
_UpperCAmelCase = torch.cat(A , dim=0 )
return mask
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = 42
_UpperCAmelCase = 42
def __init__( self , snake_case , snake_case ) -> Union[str, Any]:
super().__init__()
self.register_modules(unet=snake_case , scheduler=snake_case )
@torch.no_grad()
def __call__( self , snake_case , snake_case , snake_case = 250 , snake_case = 0.0 , snake_case = 10 , snake_case = 10 , snake_case = None , snake_case = "pil" , snake_case = True , ) -> Union[ImagePipelineOutput, Tuple]:
_UpperCAmelCase = image
_UpperCAmelCase = _preprocess_image(snake_case )
_UpperCAmelCase = original_image.to(device=self.device , dtype=self.unet.dtype )
_UpperCAmelCase = _preprocess_mask(snake_case )
_UpperCAmelCase = mask_image.to(device=self.device , dtype=self.unet.dtype )
_UpperCAmelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(snake_case , snake_case ) and len(snake_case ) != batch_size:
raise ValueError(
f'You have passed a list of generators of length {len(snake_case )}, but requested an effective batch'
f' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
_UpperCAmelCase = original_image.shape
_UpperCAmelCase = randn_tensor(snake_case , generator=snake_case , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(snake_case , snake_case , snake_case , self.device )
_UpperCAmelCase = eta
_UpperCAmelCase = self.scheduler.timesteps[0] + 1
_UpperCAmelCase = generator[0] if isinstance(snake_case , snake_case ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
_UpperCAmelCase = self.unet(snake_case , snake_case ).sample
# compute previous image: x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_UpperCAmelCase = self.scheduler.undo_step(snake_case , snake_case , snake_case )
_UpperCAmelCase = t
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(snake_case )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=snake_case )
| 24 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
_UpperCAmelCase = []
create_all_state(1 , A , A , [] , A )
return result
def UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def UpperCAmelCase ( A : list[list[int]] ):
'''simple docstring'''
for i in total_list:
print(*A )
if __name__ == "__main__":
lowercase = 4
lowercase = 2
lowercase = generate_all_combinations(n, k)
print_all_state(total_list)
| 24 | 1 |
"""simple docstring"""
import json
import os
from functools import lru_cache
from typing import List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all BART models at https://huggingface.co/models?filter=bart
lowercase = {
'''vocab_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/vocab.json''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/vocab.json''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/vocab.json''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/vocab.json''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/vocab.json''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/vocab.json''',
},
'''merges_file''': {
'''facebook/bart-base''': '''https://huggingface.co/facebook/bart-base/resolve/main/merges.txt''',
'''facebook/bart-large''': '''https://huggingface.co/facebook/bart-large/resolve/main/merges.txt''',
'''facebook/bart-large-mnli''': '''https://huggingface.co/facebook/bart-large-mnli/resolve/main/merges.txt''',
'''facebook/bart-large-cnn''': '''https://huggingface.co/facebook/bart-large-cnn/resolve/main/merges.txt''',
'''facebook/bart-large-xsum''': '''https://huggingface.co/facebook/bart-large-xsum/resolve/main/merges.txt''',
'''yjernite/bart_eli5''': '''https://huggingface.co/yjernite/bart_eli5/resolve/main/merges.txt''',
},
}
lowercase = {
'''facebook/bart-base''': 10_24,
'''facebook/bart-large''': 10_24,
'''facebook/bart-large-mnli''': 10_24,
'''facebook/bart-large-cnn''': 10_24,
'''facebook/bart-large-xsum''': 10_24,
'''yjernite/bart_eli5''': 10_24,
}
@lru_cache()
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
_UpperCAmelCase = bs[:]
_UpperCAmelCase = 0
for b in range(2**8 ):
if b not in bs:
bs.append(A )
cs.append(2**8 + n )
n += 1
_UpperCAmelCase = [chr(A ) for n in cs]
return dict(zip(A , A ) )
def UpperCAmelCase ( A : Union[str, Any] ):
'''simple docstring'''
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
return pairs
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , snake_case , snake_case , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , **snake_case , ) -> Optional[int]:
_UpperCAmelCase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else bos_token
_UpperCAmelCase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else eos_token
_UpperCAmelCase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else sep_token
_UpperCAmelCase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else cls_token
_UpperCAmelCase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else unk_token
_UpperCAmelCase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
_UpperCAmelCase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else mask_token
super().__init__(
errors=snake_case , bos_token=snake_case , eos_token=snake_case , unk_token=snake_case , sep_token=snake_case , cls_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , **snake_case , )
with open(snake_case , encoding='utf-8' ) as vocab_handle:
_UpperCAmelCase = json.load(snake_case )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
_UpperCAmelCase = errors # how to handle errors in decoding
_UpperCAmelCase = bytes_to_unicode()
_UpperCAmelCase = {v: k for k, v in self.byte_encoder.items()}
with open(snake_case , encoding='utf-8' ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split('\n' )[1:-1]
_UpperCAmelCase = [tuple(merge.split() ) for merge in bpe_merges]
_UpperCAmelCase = dict(zip(snake_case , range(len(snake_case ) ) ) )
_UpperCAmelCase = {}
_UpperCAmelCase = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
_UpperCAmelCase = re.compile(r'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
def lowerCamelCase_ ( self ) -> Optional[int]:
return len(self.encoder )
def lowerCamelCase_ ( self ) -> List[Any]:
return dict(self.encoder , **self.added_tokens_encoder )
def lowerCamelCase_ ( self , snake_case ) -> Union[str, Any]:
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = tuple(snake_case )
_UpperCAmelCase = get_pairs(snake_case )
if not pairs:
return token
while True:
_UpperCAmelCase = min(snake_case , key=lambda snake_case : self.bpe_ranks.get(snake_case , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase , _UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(snake_case ):
try:
_UpperCAmelCase = word.index(snake_case , snake_case )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase = j
if word[i] == first and i < len(snake_case ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(snake_case )
_UpperCAmelCase = new_word
if len(snake_case ) == 1:
break
else:
_UpperCAmelCase = get_pairs(snake_case )
_UpperCAmelCase = ' '.join(snake_case )
_UpperCAmelCase = word
return word
def lowerCamelCase_ ( self , snake_case ) -> List[Any]:
_UpperCAmelCase = []
for token in re.findall(self.pat , snake_case ):
_UpperCAmelCase = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(snake_case ).split(' ' ) )
return bpe_tokens
def lowerCamelCase_ ( self , snake_case ) -> List[str]:
return self.encoder.get(snake_case , self.encoder.get(self.unk_token ) )
def lowerCamelCase_ ( self , snake_case ) -> List[str]:
return self.decoder.get(snake_case )
def lowerCamelCase_ ( self , snake_case ) -> int:
_UpperCAmelCase = ''.join(snake_case )
_UpperCAmelCase = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def lowerCamelCase_ ( self , snake_case , snake_case = None ) -> Tuple[str]:
if not os.path.isdir(snake_case ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
_UpperCAmelCase = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
_UpperCAmelCase = os.path.join(
snake_case , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(snake_case , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=snake_case , ensure_ascii=snake_case ) + '\n' )
_UpperCAmelCase = 0
with open(snake_case , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda snake_case : kv[1] ):
if index != token_index:
logger.warning(
f'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
_UpperCAmelCase = token_index
writer.write(' '.join(snake_case ) + '\n' )
index += 1
return vocab_file, merge_file
def lowerCamelCase_ ( self , snake_case , snake_case = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
_UpperCAmelCase = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def lowerCamelCase_ ( self , snake_case , snake_case = None , snake_case = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=snake_case , token_ids_a=snake_case , already_has_special_tokens=snake_case )
if token_ids_a is None:
return [1] + ([0] * len(snake_case )) + [1]
return [1] + ([0] * len(snake_case )) + [1, 1] + ([0] * len(snake_case )) + [1]
def lowerCamelCase_ ( self , snake_case , snake_case = None ) -> List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self , snake_case , snake_case=False , **snake_case ) -> List[str]:
_UpperCAmelCase = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(snake_case ) > 0 and not text[0].isspace()):
_UpperCAmelCase = ' ' + text
return (text, kwargs)
| 24 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase ( A : Path , A : list ):
'''simple docstring'''
_UpperCAmelCase = '\n'.join(A )
Path(A ).open('w' ).writelines(A )
lowercase = '''patrickvonplaten/t5-tiny-random'''
lowercase = '''sshleifer/bart-tiny-random'''
lowercase = '''sshleifer/tiny-mbart'''
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case ) -> str:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(snake_case , snake_case )
_UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(snake_case , 'argv' , snake_case ):
run_generate()
assert Path(snake_case ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase_ ( self ) -> str:
self.run_eval_tester(snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> List[Any]:
self.run_eval_tester(snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> Dict:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
_UpperCAmelCase = str(tmp_dir / 'scores.json' )
_UpperCAmelCase = str(tmp_dir / 'val.target' )
_dump_articles(snake_case , text['en'] )
_dump_articles(snake_case , text['de'] )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(snake_case , 'argv' , snake_case ):
with CaptureStdout() as cs:
run_search()
_UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args']
_UpperCAmelCase = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(snake_case ).exists()
os.remove(Path(snake_case ) )
| 24 | 1 |
"""simple docstring"""
from __future__ import annotations
from collections import deque
from collections.abc import Sequence
from dataclasses import dataclass
from typing import Any
@dataclass
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = 42
_UpperCAmelCase = None
_UpperCAmelCase = None
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = Node(1 )
_UpperCAmelCase = Node(2 )
_UpperCAmelCase = Node(3 )
_UpperCAmelCase = Node(4 )
_UpperCAmelCase = Node(5 )
return tree
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
return [root.data, *preorder(root.left ), *preorder(root.right )] if root else []
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
return postorder(root.left ) + postorder(root.right ) + [root.data] if root else []
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
return [*inorder(root.left ), root.data, *inorder(root.right )] if root else []
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
return (max(height(root.left ) , height(root.right ) ) + 1) if root else 0
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
_UpperCAmelCase = []
if root is None:
return output
_UpperCAmelCase = deque([root] )
while process_queue:
_UpperCAmelCase = process_queue.popleft()
output.append(node.data )
if node.left:
process_queue.append(node.left )
if node.right:
process_queue.append(node.right )
return output
def UpperCAmelCase ( A : Node | None , A : int ):
'''simple docstring'''
_UpperCAmelCase = []
def populate_output(A : Node | None , A : int ) -> None:
if not root:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.left , level - 1 )
populate_output(root.right , level - 1 )
populate_output(A , A )
return output
def UpperCAmelCase ( A : Node | None , A : int ):
'''simple docstring'''
_UpperCAmelCase = []
def populate_output(A : Node | None , A : int ) -> None:
if root is None:
return
if level == 1:
output.append(root.data )
elif level > 1:
populate_output(root.right , level - 1 )
populate_output(root.left , level - 1 )
populate_output(A , A )
return output
def UpperCAmelCase ( A : Node | None ):
'''simple docstring'''
if root is None:
return []
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = height(A )
for h in range(1 , height_tree + 1 ):
if not flag:
output.append(get_nodes_from_left_to_right(A , A ) )
_UpperCAmelCase = 1
else:
output.append(get_nodes_from_right_to_left(A , A ) )
_UpperCAmelCase = 0
return output
def UpperCAmelCase ( ): # Main function for testing.
'''simple docstring'''
_UpperCAmelCase = make_tree()
print(f'In-order Traversal: {inorder(A )}' )
print(f'Pre-order Traversal: {preorder(A )}' )
print(f'Post-order Traversal: {postorder(A )}' , '\n' )
print(f'Height of Tree: {height(A )}' , '\n' )
print('Complete Level Order Traversal: ' )
print(level_order(A ) , '\n' )
print('Level-wise order Traversal: ' )
for level in range(1 , height(A ) + 1 ):
print(f'Level {level}:' , get_nodes_from_left_to_right(A , level=A ) )
print('\nZigZag order Traversal: ' )
print(zigzag(A ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
main()
| 24 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase = logging.get_logger(__name__)
lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset)
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A , info=A , split=A , axis=A )
else:
return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
| 24 | 1 |
"""simple docstring"""
import importlib
import os
import sys
# This is required to make the module import works (when the python process is running from the root of the repo)
sys.path.append('''.''')
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = test_file.split(os.path.sep )
if components[0:2] != ["tests", "models"]:
raise ValueError(
'`test_file` should start with `tests/models/` (with `/` being the OS specific path separator). Got '
f'{test_file} instead.' )
_UpperCAmelCase = components[-1]
if not test_fn.endswith('py' ):
raise ValueError(f'`test_file` should be a python file. Got {test_fn} instead.' )
if not test_fn.startswith('test_modeling_' ):
raise ValueError(
f'`test_file` should point to a file name of the form `test_modeling_*.py`. Got {test_fn} instead.' )
_UpperCAmelCase = components[:-1] + [test_fn.replace('.py' , '' )]
_UpperCAmelCase = '.'.join(A )
return test_module_path
def UpperCAmelCase ( A : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = get_module_path(A )
_UpperCAmelCase = importlib.import_module(A )
return test_module
def UpperCAmelCase ( A : Dict ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = get_test_module(A )
for attr in dir(A ):
if attr.endswith('ModelTester' ):
tester_classes.append(getattr(A , A ) )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def UpperCAmelCase ( A : Optional[int] ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = get_test_module(A )
for attr in dir(A ):
_UpperCAmelCase = getattr(A , A )
# (TF/Flax)ModelTesterMixin is also an attribute in specific model test module. Let's exclude them by checking
# `all_model_classes` is not empty (which also excludes other special classes).
_UpperCAmelCase = getattr(A , 'all_model_classes' , [] )
if len(A ) > 0:
test_classes.append(A )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = get_test_classes(A )
_UpperCAmelCase = set()
for test_class in test_classes:
model_classes.update(test_class.all_model_classes )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def UpperCAmelCase ( A : Dict ):
'''simple docstring'''
_UpperCAmelCase = test_class()
if hasattr(A , 'setUp' ):
test.setUp()
_UpperCAmelCase = None
if hasattr(A , 'model_tester' ):
# `(TF/Flax)ModelTesterMixin` has this attribute default to `None`. Let's skip this case.
if test.model_tester is not None:
_UpperCAmelCase = test.model_tester.__class__
return model_tester
def UpperCAmelCase ( A : Any , A : int ):
'''simple docstring'''
_UpperCAmelCase = get_test_classes(A )
_UpperCAmelCase = []
for test_class in test_classes:
if model_class in test_class.all_model_classes:
target_test_classes.append(A )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def UpperCAmelCase ( A : str , A : Dict ):
'''simple docstring'''
_UpperCAmelCase = get_test_classes_for_model(A , A )
_UpperCAmelCase = []
for test_class in test_classes:
_UpperCAmelCase = get_model_tester_from_test_class(A )
if tester_class is not None:
tester_classes.append(A )
# sort with class names
return sorted(A , key=lambda A : x.__name__ )
def UpperCAmelCase ( A : Optional[Any] ):
'''simple docstring'''
_UpperCAmelCase = get_test_classes(A )
_UpperCAmelCase = {test_class: get_model_tester_from_test_class(A ) for test_class in test_classes}
return test_tester_mapping
def UpperCAmelCase ( A : Any ):
'''simple docstring'''
_UpperCAmelCase = get_model_classes(A )
_UpperCAmelCase = {
model_class: get_test_classes_for_model(A , A ) for model_class in model_classes
}
return model_test_mapping
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = get_model_classes(A )
_UpperCAmelCase = {
model_class: get_tester_classes_for_model(A , A ) for model_class in model_classes
}
return model_to_tester_mapping
def UpperCAmelCase ( A : List[Any] ):
'''simple docstring'''
if isinstance(A , A ):
return o
elif isinstance(A , A ):
return o.__name__
elif isinstance(A , (list, tuple) ):
return [to_json(A ) for x in o]
elif isinstance(A , A ):
return {to_json(A ): to_json(A ) for k, v in o.items()}
else:
return o
| 24 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case )
return generator, ["Something to write", "Something else"]
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = generator('Something there' )
self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
_UpperCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
_UpperCAmelCase = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
with self.assertRaises(snake_case ):
generator(4 )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
_UpperCAmelCase = 3
_UpperCAmelCase = generator(
'Something there' , num_return_sequences=snake_case , num_beams=snake_case , )
_UpperCAmelCase = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(snake_case , snake_case )
_UpperCAmelCase = generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case )
self.assertEqual(
snake_case , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
_UpperCAmelCase = generator.model.config.eos_token_id
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = generator(
['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , )
self.assertEqual(
snake_case , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
| 24 | 1 |
"""simple docstring"""
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=7 , snake_case=True , snake_case=True , snake_case=True , snake_case=True , snake_case=99 , snake_case=32 , snake_case=2 , snake_case=4 , snake_case=37 , snake_case="gelu" , snake_case=0.1 , snake_case=0.1 , snake_case=512 , snake_case=16 , snake_case=2 , snake_case=0.02 , snake_case=3 , snake_case=4 , snake_case=None , ) -> Dict:
_UpperCAmelCase = parent
_UpperCAmelCase = 13
_UpperCAmelCase = 7
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = 99
_UpperCAmelCase = 384
_UpperCAmelCase = 2
_UpperCAmelCase = 4
_UpperCAmelCase = 37
_UpperCAmelCase = 'gelu'
_UpperCAmelCase = 0.1
_UpperCAmelCase = 0.1
_UpperCAmelCase = 512
_UpperCAmelCase = 16
_UpperCAmelCase = 2
_UpperCAmelCase = 0.02
_UpperCAmelCase = 3
_UpperCAmelCase = 4
_UpperCAmelCase = 128
_UpperCAmelCase = 2
_UpperCAmelCase = 9
_UpperCAmelCase = 1
_UpperCAmelCase = None
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=snake_case , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Any:
_UpperCAmelCase = TFConvBertModel(config=snake_case )
_UpperCAmelCase = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
_UpperCAmelCase = [input_ids, input_mask]
_UpperCAmelCase = model(snake_case )
_UpperCAmelCase = model(snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[str]:
_UpperCAmelCase = TFConvBertForMaskedLM(config=snake_case )
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> str:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFConvBertForSequenceClassification(config=snake_case )
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> int:
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = TFConvBertForMultipleChoice(config=snake_case )
_UpperCAmelCase = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = tf.tile(tf.expand_dims(snake_case , 1 ) , (1, self.num_choices, 1) )
_UpperCAmelCase = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
_UpperCAmelCase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> List[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = TFConvBertForTokenClassification(config=snake_case )
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = TFConvBertForQuestionAnswering(config=snake_case )
_UpperCAmelCase = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
_UpperCAmelCase = model(snake_case )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) , (
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowercase__ ( A, A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
_UpperCAmelCase = (
{
'''feature-extraction''': TFConvBertModel,
'''fill-mask''': TFConvBertForMaskedLM,
'''question-answering''': TFConvBertForQuestionAnswering,
'''text-classification''': TFConvBertForSequenceClassification,
'''token-classification''': TFConvBertForTokenClassification,
'''zero-shot''': TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = TFConvBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case , hidden_size=37 )
def lowerCamelCase_ ( self ) -> int:
self.config_tester.run_common_tests()
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*snake_case )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*snake_case )
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*snake_case )
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*snake_case )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*snake_case )
@slow
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
_UpperCAmelCase = True
if hasattr(snake_case , 'use_cache' ):
_UpperCAmelCase = True
_UpperCAmelCase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
_UpperCAmelCase = getattr(self.model_tester , 'key_length' , snake_case )
for model_class in self.all_model_classes:
_UpperCAmelCase = self._prepare_for_class(snake_case , snake_case )
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = len(model(snake_case ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(snake_case , saved_model=snake_case )
_UpperCAmelCase = os.path.join(snake_case , 'saved_model' , '1' )
_UpperCAmelCase = tf.keras.models.load_model(snake_case )
_UpperCAmelCase = model(snake_case )
if self.is_encoder_decoder:
_UpperCAmelCase = outputs['encoder_hidden_states']
_UpperCAmelCase = outputs['encoder_attentions']
else:
_UpperCAmelCase = outputs['hidden_states']
_UpperCAmelCase = outputs['attentions']
self.assertEqual(len(snake_case ) , snake_case )
_UpperCAmelCase = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(snake_case ) , snake_case )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(snake_case )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
_UpperCAmelCase = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
_UpperCAmelCase = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
_UpperCAmelCase = getattr(self.model_tester , 'key_length' , snake_case )
_UpperCAmelCase = getattr(self.model_tester , 'key_length' , snake_case )
def check_decoder_attentions_output(snake_case ):
_UpperCAmelCase = len(snake_case )
self.assertEqual(out_len % 2 , 0 )
_UpperCAmelCase = outputs.decoder_attentions
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(snake_case ):
_UpperCAmelCase = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(snake_case ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = model(self._prepare_for_class(snake_case , snake_case ) )
_UpperCAmelCase = len(snake_case )
self.assertEqual(config.output_hidden_states , snake_case )
check_encoder_attentions_output(snake_case )
if self.is_encoder_decoder:
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = model(self._prepare_for_class(snake_case , snake_case ) )
self.assertEqual(config.output_hidden_states , snake_case )
check_decoder_attentions_output(snake_case )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = model(self._prepare_for_class(snake_case , snake_case ) )
self.assertEqual(config.output_hidden_states , snake_case )
check_encoder_attentions_output(snake_case )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = model(self._prepare_for_class(snake_case , snake_case ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(snake_case ) )
self.assertEqual(model.config.output_hidden_states , snake_case )
check_encoder_attentions_output(snake_case )
@require_tf
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@slow
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
_UpperCAmelCase = tf.constant([[0, 1, 2, 3, 4, 5]] )
_UpperCAmelCase = model(snake_case )[0]
_UpperCAmelCase = [1, 6, 768]
self.assertEqual(output.shape , snake_case )
_UpperCAmelCase = tf.constant(
[
[
[-0.03475493, -0.4686034, -0.30638832],
[0.22637248, -0.26988646, -0.7423424],
[0.10324868, -0.45013508, -0.58280784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , snake_case , atol=1E-4 )
| 24 |
"""simple docstring"""
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_UpperCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , A ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowercase = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowercase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 1 |
"""simple docstring"""
def UpperCAmelCase ( A : float , A : float ):
'''simple docstring'''
if mass < 0:
raise ValueError('The mass of a body cannot be negative' )
return 0.5 * mass * abs(A ) * abs(A )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 24 |
"""simple docstring"""
import os
lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def UpperCAmelCase ( A : str ):
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(A ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = ''
_UpperCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
_UpperCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase ( A : str = "/p089_roman.txt" ):
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(A ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(A )
_UpperCAmelCase = generate_roman_numerals(A )
savings += len(A ) - len(A )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 24 | 1 |
"""simple docstring"""
class lowercase__ :
'''simple docstring'''
def __init__( self ) -> Dict:
_UpperCAmelCase = {}
def lowerCamelCase_ ( self ) -> None:
print(self.vertex )
for i in self.vertex:
print(snake_case , ' -> ' , ' -> '.join([str(snake_case ) for j in self.vertex[i]] ) )
def lowerCamelCase_ ( self , snake_case , snake_case ) -> None:
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(snake_case )
else:
# else make a new vertex
_UpperCAmelCase = [to_vertex]
def lowerCamelCase_ ( self ) -> None:
# visited array for storing already visited nodes
_UpperCAmelCase = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(snake_case , snake_case )
def lowerCamelCase_ ( self , snake_case , snake_case ) -> None:
# mark start vertex as visited
_UpperCAmelCase = True
print(snake_case , end=' ' )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(snake_case , snake_case )
if __name__ == "__main__":
lowercase = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print('''DFS:''')
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 24 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
_UpperCAmelCase = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(snake_case ) , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
| 24 | 1 |
"""simple docstring"""
from __future__ import annotations
from numpy import array, cos, cross, floataa, radians, sin
from numpy.typing import NDArray
def UpperCAmelCase ( A : float , A : float , A : bool = False ):
'''simple docstring'''
if radian_mode:
return [magnitude * cos(A ), magnitude * sin(A )]
return [magnitude * cos(radians(A ) ), magnitude * sin(radians(A ) )]
def UpperCAmelCase ( A : NDArray[floataa] , A : NDArray[floataa] , A : float = 10**-1 ):
'''simple docstring'''
_UpperCAmelCase = cross(A , A )
_UpperCAmelCase = sum(A )
return abs(A ) < eps
if __name__ == "__main__":
# Test to check if it works
lowercase = array(
[
polar_force(718.4, 1_80 - 30),
polar_force(879.54, 45),
polar_force(1_00, -90),
]
)
lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem 1 in image_data/2D_problems.jpg
lowercase = array(
[
polar_force(30 * 9.81, 15),
polar_force(2_15, 1_80 - 45),
polar_force(2_64, 90 - 30),
]
)
lowercase = array([[0, 0], [0, 0], [0, 0]])
assert in_static_equilibrium(forces, location)
# Problem in image_data/2D_problems_1.jpg
lowercase = array([[0, -20_00], [0, -12_00], [0, 1_56_00], [0, -1_24_00]])
lowercase = array([[0, 0], [6, 0], [10, 0], [12, 0]])
assert in_static_equilibrium(forces, location)
import doctest
doctest.testmod()
| 24 |
"""simple docstring"""
import os
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = os.path.join(os.path.dirname(A ) , 'num.txt' )
with open(A ) as file_hand:
return str(sum(int(A ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution())
| 24 | 1 |
"""simple docstring"""
import math
import time
from typing import Dict, List, Optional
from torch.utils.data import Dataset
from transformers import SeqaSeqTrainer, is_torch_tpu_available
from transformers.trainer_utils import PredictionOutput, speed_metrics
if is_torch_tpu_available(check_device=False):
import torch_xla.core.xla_model as xm
import torch_xla.debug.metrics as met
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , *snake_case , snake_case=None , snake_case=None , **snake_case ) -> Optional[Any]:
super().__init__(*snake_case , **snake_case )
_UpperCAmelCase = eval_examples
_UpperCAmelCase = post_process_function
def lowerCamelCase_ ( self , snake_case = None , snake_case=None , snake_case = None , snake_case = "eval" , **snake_case , ) -> Dict[str, float]:
_UpperCAmelCase = gen_kwargs.copy()
_UpperCAmelCase = (
gen_kwargs['max_length'] if gen_kwargs.get('max_length' ) is not None else self.args.generation_max_length
)
_UpperCAmelCase = (
gen_kwargs['num_beams'] if gen_kwargs.get('num_beams' ) is not None else self.args.generation_num_beams
)
_UpperCAmelCase = gen_kwargs
_UpperCAmelCase = self.eval_dataset if eval_dataset is None else eval_dataset
_UpperCAmelCase = self.get_eval_dataloader(snake_case )
_UpperCAmelCase = self.eval_examples if eval_examples is None else eval_examples
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase = self.compute_metrics
_UpperCAmelCase = None
_UpperCAmelCase = time.time()
_UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCAmelCase = eval_loop(
snake_case , description='Evaluation' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case , metric_key_prefix=snake_case , )
finally:
_UpperCAmelCase = compute_metrics
_UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
snake_case , snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is not None and self.compute_metrics is not None and self.args.should_save:
# Only the main node write the results by default
_UpperCAmelCase = self.post_process_function(snake_case , snake_case , snake_case )
_UpperCAmelCase = self.compute_metrics(snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
_UpperCAmelCase = metrics.pop(snake_case )
metrics.update(output.metrics )
else:
_UpperCAmelCase = output.metrics
if self.args.should_log:
# Only the main node log the results by default
self.log(snake_case )
if self.args.tpu_metrics_debug or self.args.debug:
# tpu-comment: Logging debug metrics for PyTorch/XLA (compile, execute times, ops, etc.)
xm.master_print(met.metrics_report() )
_UpperCAmelCase = self.callback_handler.on_evaluate(self.args , self.state , self.control , snake_case )
return metrics
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case = "test" , **snake_case ) -> Optional[int]:
_UpperCAmelCase = gen_kwargs.copy()
_UpperCAmelCase = self.get_test_dataloader(snake_case )
# Temporarily disable metric computation, we will do it in the loop here.
_UpperCAmelCase = self.compute_metrics
_UpperCAmelCase = None
_UpperCAmelCase = time.time()
_UpperCAmelCase = self.prediction_loop if self.args.use_legacy_prediction_loop else self.evaluation_loop
try:
_UpperCAmelCase = eval_loop(
snake_case , description='Prediction' , prediction_loss_only=True if compute_metrics is None else None , ignore_keys=snake_case , metric_key_prefix=snake_case , )
finally:
_UpperCAmelCase = compute_metrics
_UpperCAmelCase = self.args.eval_batch_size * self.args.world_size
if f'{metric_key_prefix}_jit_compilation_time' in output.metrics:
start_time += output.metrics[f'{metric_key_prefix}_jit_compilation_time']
output.metrics.update(
speed_metrics(
snake_case , snake_case , num_samples=output.num_samples , num_steps=math.ceil(output.num_samples / total_batch_size ) , ) )
if self.post_process_function is None or self.compute_metrics is None:
return output
_UpperCAmelCase = self.post_process_function(snake_case , snake_case , snake_case , 'predict' )
_UpperCAmelCase = self.compute_metrics(snake_case )
# Prefix all keys with metric_key_prefix + '_'
for key in list(metrics.keys() ):
if not key.startswith(f'{metric_key_prefix}_' ):
_UpperCAmelCase = metrics.pop(snake_case )
metrics.update(output.metrics )
return PredictionOutput(predictions=predictions.predictions , label_ids=predictions.label_ids , metrics=snake_case )
| 24 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowercase = {
'''configuration_roberta''': ['''ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''RobertaConfig''', '''RobertaOnnxConfig'''],
'''tokenization_roberta''': ['''RobertaTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''RobertaTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''RobertaForCausalLM''',
'''RobertaForMaskedLM''',
'''RobertaForMultipleChoice''',
'''RobertaForQuestionAnswering''',
'''RobertaForSequenceClassification''',
'''RobertaForTokenClassification''',
'''RobertaModel''',
'''RobertaPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFRobertaForCausalLM''',
'''TFRobertaForMaskedLM''',
'''TFRobertaForMultipleChoice''',
'''TFRobertaForQuestionAnswering''',
'''TFRobertaForSequenceClassification''',
'''TFRobertaForTokenClassification''',
'''TFRobertaMainLayer''',
'''TFRobertaModel''',
'''TFRobertaPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''FlaxRobertaForCausalLM''',
'''FlaxRobertaForMaskedLM''',
'''FlaxRobertaForMultipleChoice''',
'''FlaxRobertaForQuestionAnswering''',
'''FlaxRobertaForSequenceClassification''',
'''FlaxRobertaForTokenClassification''',
'''FlaxRobertaModel''',
'''FlaxRobertaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_roberta import ROBERTA_PRETRAINED_CONFIG_ARCHIVE_MAP, RobertaConfig, RobertaOnnxConfig
from .tokenization_roberta import RobertaTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_roberta_fast import RobertaTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta import (
ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaForCausalLM,
RobertaForMaskedLM,
RobertaForMultipleChoice,
RobertaForQuestionAnswering,
RobertaForSequenceClassification,
RobertaForTokenClassification,
RobertaModel,
RobertaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta import (
TF_ROBERTA_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaForCausalLM,
TFRobertaForMaskedLM,
TFRobertaForMultipleChoice,
TFRobertaForQuestionAnswering,
TFRobertaForSequenceClassification,
TFRobertaForTokenClassification,
TFRobertaMainLayer,
TFRobertaModel,
TFRobertaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta import (
FlaxRobertaForCausalLM,
FlaxRobertaForMaskedLM,
FlaxRobertaForMultipleChoice,
FlaxRobertaForQuestionAnswering,
FlaxRobertaForSequenceClassification,
FlaxRobertaForTokenClassification,
FlaxRobertaModel,
FlaxRobertaPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 1 |
"""simple docstring"""
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
lowercase = logging.get_logger(__name__)
lowercase = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
lowercase = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
lowercase = {
'''allenai/led-base-16384''': 1_63_84,
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = VOCAB_FILES_NAMES
_UpperCAmelCase = PRETRAINED_VOCAB_FILES_MAP
_UpperCAmelCase = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_UpperCAmelCase = LEDTokenizer
_UpperCAmelCase = ['''input_ids''', '''attention_mask''']
def __init__( self , snake_case=None , snake_case=None , snake_case=None , snake_case="replace" , snake_case="<s>" , snake_case="</s>" , snake_case="</s>" , snake_case="<s>" , snake_case="<unk>" , snake_case="<pad>" , snake_case="<mask>" , snake_case=False , snake_case=True , **snake_case , ) -> Optional[int]:
super().__init__(
snake_case , snake_case , tokenizer_file=snake_case , errors=snake_case , bos_token=snake_case , eos_token=snake_case , sep_token=snake_case , cls_token=snake_case , unk_token=snake_case , pad_token=snake_case , mask_token=snake_case , add_prefix_space=snake_case , trim_offsets=snake_case , **snake_case , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get('add_prefix_space' , snake_case ) != add_prefix_space:
_UpperCAmelCase = getattr(snake_case , pre_tok_state.pop('type' ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**snake_case )
_UpperCAmelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_UpperCAmelCase = 'post_processor'
_UpperCAmelCase = getattr(self.backend_tokenizer , snake_case , snake_case )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state['sep'] )
if "cls" in state:
_UpperCAmelCase = tuple(state['cls'] )
_UpperCAmelCase = False
if state.get('add_prefix_space' , snake_case ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get('trim_offsets' , snake_case ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(snake_case , state.pop('type' ) )
_UpperCAmelCase = component_class(**snake_case )
setattr(self.backend_tokenizer , snake_case , snake_case )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def lowerCamelCase_ ( self ) -> str:
if self._mask_token is None:
if self.verbose:
logger.error('Using mask_token, but it is not set yet.' )
return None
return str(self._mask_token )
@mask_token.setter
def lowerCamelCase_ ( self , snake_case ) -> str:
_UpperCAmelCase = AddedToken(snake_case , lstrip=snake_case , rstrip=snake_case ) if isinstance(snake_case , snake_case ) else value
_UpperCAmelCase = value
def lowerCamelCase_ ( self , *snake_case , **snake_case ) -> BatchEncoding:
_UpperCAmelCase = kwargs.get('is_split_into_words' , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._batch_encode_plus(*snake_case , **snake_case )
def lowerCamelCase_ ( self , *snake_case , **snake_case ) -> BatchEncoding:
_UpperCAmelCase = kwargs.get('is_split_into_words' , snake_case )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f'You need to instantiate {self.__class__.__name__} with add_prefix_space=True '
'to use it with pretokenized inputs.' )
return super()._encode_plus(*snake_case , **snake_case )
def lowerCamelCase_ ( self , snake_case , snake_case = None ) -> Tuple[str]:
_UpperCAmelCase = self._tokenizer.model.save(snake_case , name=snake_case )
return tuple(snake_case )
def lowerCamelCase_ ( self , snake_case , snake_case=None ) -> List[Any]:
_UpperCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def lowerCamelCase_ ( self , snake_case , snake_case = None ) -> List[int]:
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def lowerCamelCase_ ( self , snake_case , snake_case = None , snake_case = PaddingStrategy.DO_NOT_PAD , snake_case = None , snake_case = None , ) -> dict:
_UpperCAmelCase = super()._pad(
encoded_inputs=snake_case , max_length=snake_case , padding_strategy=snake_case , pad_to_multiple_of=snake_case , return_attention_mask=snake_case , )
# Load from model defaults
if return_attention_mask is None:
_UpperCAmelCase = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_UpperCAmelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_UpperCAmelCase = len(encoded_inputs['global_attention_mask'] ) != len(snake_case )
if needs_to_be_padded:
_UpperCAmelCase = len(snake_case ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_UpperCAmelCase = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
_UpperCAmelCase = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs
| 24 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_yolos import YolosImageProcessor
lowercase = logging.get_logger(__name__)
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , *snake_case , **snake_case ) -> None:
warnings.warn(
'The class YolosFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please'
' use YolosImageProcessor instead.' , snake_case , )
super().__init__(*snake_case , **snake_case )
| 24 | 1 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase ( A : Path , A : list ):
'''simple docstring'''
_UpperCAmelCase = '\n'.join(A )
Path(A ).open('w' ).writelines(A )
lowercase = '''patrickvonplaten/t5-tiny-random'''
lowercase = '''sshleifer/bart-tiny-random'''
lowercase = '''sshleifer/tiny-mbart'''
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case ) -> str:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(snake_case , snake_case )
_UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(snake_case , 'argv' , snake_case ):
run_generate()
assert Path(snake_case ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase_ ( self ) -> str:
self.run_eval_tester(snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> List[Any]:
self.run_eval_tester(snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> Dict:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
_UpperCAmelCase = str(tmp_dir / 'scores.json' )
_UpperCAmelCase = str(tmp_dir / 'val.target' )
_dump_articles(snake_case , text['en'] )
_dump_articles(snake_case , text['de'] )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(snake_case , 'argv' , snake_case ):
with CaptureStdout() as cs:
run_search()
_UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args']
_UpperCAmelCase = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(snake_case ).exists()
os.remove(Path(snake_case ) )
| 24 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = '''beit'''
def __init__( self , snake_case=8192 , snake_case=768 , snake_case=12 , snake_case=12 , snake_case=3072 , snake_case="gelu" , snake_case=0.0 , snake_case=0.0 , snake_case=0.02 , snake_case=1E-12 , snake_case=224 , snake_case=16 , snake_case=3 , snake_case=False , snake_case=False , snake_case=False , snake_case=False , snake_case=0.1 , snake_case=0.1 , snake_case=True , snake_case=[3, 5, 7, 11] , snake_case=[1, 2, 3, 6] , snake_case=True , snake_case=0.4 , snake_case=256 , snake_case=1 , snake_case=False , snake_case=255 , **snake_case , ) -> str:
super().__init__(**snake_case )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = use_mask_token
_UpperCAmelCase = use_absolute_position_embeddings
_UpperCAmelCase = use_relative_position_bias
_UpperCAmelCase = use_shared_relative_position_bias
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = use_mean_pooling
# decode head attributes (semantic segmentation)
_UpperCAmelCase = out_indices
_UpperCAmelCase = pool_scales
# auxiliary head attributes (semantic segmentation)
_UpperCAmelCase = use_auxiliary_head
_UpperCAmelCase = auxiliary_loss_weight
_UpperCAmelCase = auxiliary_channels
_UpperCAmelCase = auxiliary_num_convs
_UpperCAmelCase = auxiliary_concat_input
_UpperCAmelCase = semantic_loss_ignore_index
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 24 | 1 |
"""simple docstring"""
import gc
import random
import unittest
import numpy as np
import torch
from transformers import (
CLIPImageProcessor,
CLIPTextConfig,
CLIPTextModelWithProjection,
CLIPTokenizer,
CLIPVisionConfig,
CLIPVisionModelWithProjection,
)
from diffusers import (
DiffusionPipeline,
UnCLIPImageVariationPipeline,
UnCLIPScheduler,
UNetaDConditionModel,
UNetaDModel,
)
from diffusers.pipelines.unclip.text_proj import UnCLIPTextProjModel
from diffusers.utils import floats_tensor, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, load_image, require_torch_gpu, skip_mps
from ..pipeline_params import IMAGE_VARIATION_BATCH_PARAMS, IMAGE_VARIATION_PARAMS
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = UnCLIPImageVariationPipeline
_UpperCAmelCase = IMAGE_VARIATION_PARAMS - {'''height''', '''width''', '''guidance_scale'''}
_UpperCAmelCase = IMAGE_VARIATION_BATCH_PARAMS
_UpperCAmelCase = [
'''generator''',
'''return_dict''',
'''decoder_num_inference_steps''',
'''super_res_num_inference_steps''',
]
_UpperCAmelCase = False
@property
def lowerCamelCase_ ( self ) -> List[Any]:
return 32
@property
def lowerCamelCase_ ( self ) -> Union[str, Any]:
return 32
@property
def lowerCamelCase_ ( self ) -> int:
return self.time_input_dim
@property
def lowerCamelCase_ ( self ) -> Any:
return self.time_input_dim * 4
@property
def lowerCamelCase_ ( self ) -> Tuple:
return 100
@property
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
return tokenizer
@property
def lowerCamelCase_ ( self ) -> Any:
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
return CLIPTextModelWithProjection(snake_case )
@property
def lowerCamelCase_ ( self ) -> int:
torch.manual_seed(0 )
_UpperCAmelCase = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size , projection_dim=self.text_embedder_hidden_size , num_hidden_layers=5 , num_attention_heads=4 , image_size=32 , intermediate_size=37 , patch_size=1 , )
return CLIPVisionModelWithProjection(snake_case )
@property
def lowerCamelCase_ ( self ) -> Union[str, Any]:
torch.manual_seed(0 )
_UpperCAmelCase = {
'clip_embeddings_dim': self.text_embedder_hidden_size,
'time_embed_dim': self.time_embed_dim,
'cross_attention_dim': self.cross_attention_dim,
}
_UpperCAmelCase = UnCLIPTextProjModel(**snake_case )
return model
@property
def lowerCamelCase_ ( self ) -> Tuple:
torch.manual_seed(0 )
_UpperCAmelCase = {
'sample_size': 32,
# RGB in channels
'in_channels': 3,
# Out channels is double in channels because predicts mean and variance
'out_channels': 6,
'down_block_types': ('ResnetDownsampleBlock2D', 'SimpleCrossAttnDownBlock2D'),
'up_block_types': ('SimpleCrossAttnUpBlock2D', 'ResnetUpsampleBlock2D'),
'mid_block_type': 'UNetMidBlock2DSimpleCrossAttn',
'block_out_channels': (self.block_out_channels_a, self.block_out_channels_a * 2),
'layers_per_block': 1,
'cross_attention_dim': self.cross_attention_dim,
'attention_head_dim': 4,
'resnet_time_scale_shift': 'scale_shift',
'class_embed_type': 'identity',
}
_UpperCAmelCase = UNetaDConditionModel(**snake_case )
return model
@property
def lowerCamelCase_ ( self ) -> Tuple:
return {
"sample_size": 64,
"layers_per_block": 1,
"down_block_types": ("ResnetDownsampleBlock2D", "ResnetDownsampleBlock2D"),
"up_block_types": ("ResnetUpsampleBlock2D", "ResnetUpsampleBlock2D"),
"block_out_channels": (self.block_out_channels_a, self.block_out_channels_a * 2),
"in_channels": 6,
"out_channels": 3,
}
@property
def lowerCamelCase_ ( self ) -> List[str]:
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
@property
def lowerCamelCase_ ( self ) -> List[str]:
# seeded differently to get different unet than `self.dummy_super_res_first`
torch.manual_seed(1 )
_UpperCAmelCase = UNetaDModel(**self.dummy_super_res_kwargs )
return model
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = self.dummy_decoder
_UpperCAmelCase = self.dummy_text_proj
_UpperCAmelCase = self.dummy_text_encoder
_UpperCAmelCase = self.dummy_tokenizer
_UpperCAmelCase = self.dummy_super_res_first
_UpperCAmelCase = self.dummy_super_res_last
_UpperCAmelCase = UnCLIPScheduler(
variance_type='learned_range' , prediction_type='epsilon' , num_train_timesteps=1000 , )
_UpperCAmelCase = UnCLIPScheduler(
variance_type='fixed_small_log' , prediction_type='epsilon' , num_train_timesteps=1000 , )
_UpperCAmelCase = CLIPImageProcessor(crop_size=32 , size=32 )
_UpperCAmelCase = self.dummy_image_encoder
return {
"decoder": decoder,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_proj": text_proj,
"feature_extractor": feature_extractor,
"image_encoder": image_encoder,
"super_res_first": super_res_first,
"super_res_last": super_res_last,
"decoder_scheduler": decoder_scheduler,
"super_res_scheduler": super_res_scheduler,
}
def lowerCamelCase_ ( self , snake_case , snake_case=0 , snake_case=True ) -> int:
_UpperCAmelCase = floats_tensor((1, 3, 32, 32) , rng=random.Random(snake_case ) ).to(snake_case )
if str(snake_case ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(snake_case )
else:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
if pil_image:
_UpperCAmelCase = input_image * 0.5 + 0.5
_UpperCAmelCase = input_image.clamp(0 , 1 )
_UpperCAmelCase = input_image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
_UpperCAmelCase = DiffusionPipeline.numpy_to_pil(snake_case )[0]
return {
"image": input_image,
"generator": generator,
"decoder_num_inference_steps": 2,
"super_res_num_inference_steps": 2,
"output_type": "np",
}
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
_UpperCAmelCase = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case , pil_image=snake_case )
_UpperCAmelCase = pipe(**snake_case )
_UpperCAmelCase = output.images
_UpperCAmelCase = self.get_dummy_inputs(snake_case , pil_image=snake_case )
_UpperCAmelCase = pipe(
**snake_case , return_dict=snake_case , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array(
[
0.9997,
0.0002,
0.9997,
0.9997,
0.9969,
0.0023,
0.9997,
0.9969,
0.9970,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
_UpperCAmelCase = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case , pil_image=snake_case )
_UpperCAmelCase = pipe(**snake_case )
_UpperCAmelCase = output.images
_UpperCAmelCase = self.get_dummy_inputs(snake_case , pil_image=snake_case )
_UpperCAmelCase = pipe(
**snake_case , return_dict=snake_case , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.9997, 0.0003, 0.9997, 0.9997, 0.9970, 0.0024, 0.9997, 0.9971, 0.9971] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
_UpperCAmelCase = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case , pil_image=snake_case )
_UpperCAmelCase = [
pipeline_inputs['image'],
pipeline_inputs['image'],
]
_UpperCAmelCase = pipe(**snake_case )
_UpperCAmelCase = output.images
_UpperCAmelCase = self.get_dummy_inputs(snake_case , pil_image=snake_case )
_UpperCAmelCase = [
tuple_pipeline_inputs['image'],
tuple_pipeline_inputs['image'],
]
_UpperCAmelCase = pipe(
**snake_case , return_dict=snake_case , )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (2, 64, 64, 3)
_UpperCAmelCase = np.array(
[
0.9997,
0.9989,
0.0008,
0.0021,
0.9960,
0.0018,
0.0014,
0.0002,
0.9933,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1E-2
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = torch.device('cpu' )
class lowercase__ :
'''simple docstring'''
_UpperCAmelCase = 1
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
_UpperCAmelCase = pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(0 )
_UpperCAmelCase = pipe.decoder.dtype
_UpperCAmelCase = 1
_UpperCAmelCase = (
batch_size,
pipe.decoder.config.in_channels,
pipe.decoder.config.sample_size,
pipe.decoder.config.sample_size,
)
_UpperCAmelCase = pipe.prepare_latents(
snake_case , dtype=snake_case , device=snake_case , generator=snake_case , latents=snake_case , scheduler=DummyScheduler() )
_UpperCAmelCase = (
batch_size,
pipe.super_res_first.config.in_channels // 2,
pipe.super_res_first.config.sample_size,
pipe.super_res_first.config.sample_size,
)
_UpperCAmelCase = pipe.prepare_latents(
snake_case , dtype=snake_case , device=snake_case , generator=snake_case , latents=snake_case , scheduler=DummyScheduler() )
_UpperCAmelCase = self.get_dummy_inputs(snake_case , pil_image=snake_case )
_UpperCAmelCase = pipe(
**snake_case , decoder_latents=snake_case , super_res_latents=snake_case ).images
_UpperCAmelCase = self.get_dummy_inputs(snake_case , pil_image=snake_case )
# Don't pass image, instead pass embedding
_UpperCAmelCase = pipeline_inputs.pop('image' )
_UpperCAmelCase = pipe.image_encoder(snake_case ).image_embeds
_UpperCAmelCase = pipe(
**snake_case , decoder_latents=snake_case , super_res_latents=snake_case , image_embeddings=snake_case , ).images
# make sure passing text embeddings manually is identical
assert np.abs(img_out_a - img_out_a ).max() < 1E-4
@skip_mps
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = torch_device == 'cpu'
# Check is relaxed because there is not a torch 2.0 sliced attention added kv processor
_UpperCAmelCase = 1E-2
self._test_attention_slicing_forward_pass(
test_max_difference=snake_case , expected_max_diff=snake_case )
@skip_mps
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = torch_device == 'cpu'
_UpperCAmelCase = True
_UpperCAmelCase = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
self._test_inference_batch_single_identical(
test_max_difference=snake_case , relax_max_difference=snake_case , additional_params_copy_to_batched_inputs=snake_case , )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = [
'decoder_num_inference_steps',
'super_res_num_inference_steps',
]
if torch_device == "mps":
# TODO: MPS errors with larger batch sizes
_UpperCAmelCase = [2, 3]
self._test_inference_batch_consistent(
batch_sizes=snake_case , additional_params_copy_to_batched_inputs=snake_case , )
else:
self._test_inference_batch_consistent(
additional_params_copy_to_batched_inputs=snake_case )
@skip_mps
def lowerCamelCase_ ( self ) -> int:
return super().test_dict_tuple_outputs_equivalent()
@skip_mps
def lowerCamelCase_ ( self ) -> List[Any]:
return super().test_save_load_local()
@skip_mps
def lowerCamelCase_ ( self ) -> List[Any]:
return super().test_save_load_optional_components()
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Union[str, Any]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/unclip/cat.png' )
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/unclip/karlo_v1_alpha_cat_variation_fp16.npy' )
_UpperCAmelCase = UnCLIPImageVariationPipeline.from_pretrained(
'kakaobrain/karlo-v1-alpha-image-variations' , torch_dtype=torch.floataa )
_UpperCAmelCase = pipeline.to(snake_case )
pipeline.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_UpperCAmelCase = pipeline(
snake_case , generator=snake_case , output_type='np' , )
_UpperCAmelCase = output.images[0]
assert image.shape == (256, 256, 3)
assert_mean_pixel_difference(snake_case , snake_case , 15 )
| 24 |
"""simple docstring"""
import argparse
import logging
import pickle
from collections import Counter
logging.basicConfig(
format='''%(asctime)s - %(levelname)s - %(name)s - %(message)s''', datefmt='''%m/%d/%Y %H:%M:%S''', level=logging.INFO
)
lowercase = logging.getLogger(__name__)
if __name__ == "__main__":
lowercase = argparse.ArgumentParser(
description='''Token Counts for smoothing the masking probabilities in MLM (cf XLM/word2vec)'''
)
parser.add_argument(
'''--data_file''', type=str, default='''data/dump.bert-base-uncased.pickle''', help='''The binarized dataset.'''
)
parser.add_argument(
'''--token_counts_dump''', type=str, default='''data/token_counts.bert-base-uncased.pickle''', help='''The dump file.'''
)
parser.add_argument('''--vocab_size''', default=3_05_22, type=int)
lowercase = parser.parse_args()
logger.info(F'''Loading data from {args.data_file}''')
with open(args.data_file, '''rb''') as fp:
lowercase = pickle.load(fp)
logger.info('''Counting occurrences for MLM.''')
lowercase = Counter()
for tk_ids in data:
counter.update(tk_ids)
lowercase = [0] * args.vocab_size
for k, v in counter.items():
lowercase = v
logger.info(F'''Dump to {args.token_counts_dump}''')
with open(args.token_counts_dump, '''wb''') as handle:
pickle.dump(counts, handle, protocol=pickle.HIGHEST_PROTOCOL)
| 24 | 1 |
"""simple docstring"""
import inspect
import warnings
from typing import Any, Dict, Optional, Union
from packaging import version
def UpperCAmelCase ( *A : Any , A : Optional[Union[Dict, Any]] = None , A : str=True , A : Any=2 ):
'''simple docstring'''
from .. import __version__
_UpperCAmelCase = take_from
_UpperCAmelCase = ()
if not isinstance(args[0] , A ):
_UpperCAmelCase = (args,)
for attribute, version_name, message in args:
if version.parse(version.parse(A ).base_version ) >= version.parse(A ):
raise ValueError(
f'The deprecation tuple {(attribute, version_name, message)} should be removed since diffusers\''
f' version {__version__} is >= {version_name}' )
_UpperCAmelCase = None
if isinstance(A , A ) and attribute in deprecated_kwargs:
values += (deprecated_kwargs.pop(A ),)
_UpperCAmelCase = f'The `{attribute}` argument is deprecated and will be removed in version {version_name}.'
elif hasattr(A , A ):
values += (getattr(A , A ),)
_UpperCAmelCase = f'The `{attribute}` attribute is deprecated and will be removed in version {version_name}.'
elif deprecated_kwargs is None:
_UpperCAmelCase = f'`{attribute}` is deprecated and will be removed in version {version_name}.'
if warning is not None:
_UpperCAmelCase = warning + ' ' if standard_warn else ''
warnings.warn(warning + message , A , stacklevel=A )
if isinstance(A , A ) and len(A ) > 0:
_UpperCAmelCase = inspect.getouterframes(inspect.currentframe() )[1]
_UpperCAmelCase = call_frame.filename
_UpperCAmelCase = call_frame.lineno
_UpperCAmelCase = call_frame.function
_UpperCAmelCase , _UpperCAmelCase = next(iter(deprecated_kwargs.items() ) )
raise TypeError(f'{function} in {filename} line {line_number-1} got an unexpected keyword argument `{key}`' )
if len(A ) == 0:
return
elif len(A ) == 1:
return values[0]
return values
| 24 |
"""simple docstring"""
from itertools import permutations
def UpperCAmelCase ( A : tuple ):
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(A ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase ( A : int = 10 ):
'''simple docstring'''
return sum(
int(''.join(map(A , A ) ) )
for num in permutations(range(A ) )
if is_substring_divisible(A ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 24 | 1 |
"""simple docstring"""
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
lowercase = abspath(join(dirname(dirname(__file__)), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def UpperCAmelCase ( A : int ):
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(A )
def UpperCAmelCase ( A : Any ):
'''simple docstring'''
from diffusers.utils.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase = terminalreporter.config.getoption('--make-reports' )
if make_reports:
pytest_terminal_summary_main(A , id=A )
| 24 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
lowercase = {
'''configuration_mvp''': ['''MVP_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MvpConfig''', '''MvpOnnxConfig'''],
'''tokenization_mvp''': ['''MvpTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = ['''MvpTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''MVP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MvpForCausalLM''',
'''MvpForConditionalGeneration''',
'''MvpForQuestionAnswering''',
'''MvpForSequenceClassification''',
'''MvpModel''',
'''MvpPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mvp import MVP_PRETRAINED_CONFIG_ARCHIVE_MAP, MvpConfig, MvpOnnxConfig
from .tokenization_mvp import MvpTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mvp_fast import MvpTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mvp import (
MVP_PRETRAINED_MODEL_ARCHIVE_LIST,
MvpForCausalLM,
MvpForConditionalGeneration,
MvpForQuestionAnswering,
MvpForSequenceClassification,
MvpModel,
MvpPreTrainedModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 1 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = VideoToVideoSDPipeline
_UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({'''video'''} ) - {'''image''', '''width''', '''height'''}
_UpperCAmelCase = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({'''video'''} ) - {'''image'''}
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {'''latents'''}
_UpperCAmelCase = False
# No `output_type`.
_UpperCAmelCase = frozenset(
[
'''num_inference_steps''',
'''generator''',
'''latents''',
'''return_dict''',
'''callback''',
'''callback_steps''',
] )
def lowerCamelCase_ ( self ) -> Optional[int]:
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
_UpperCAmelCase = CLIPTextModel(snake_case )
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Tuple:
# 3 frames
_UpperCAmelCase = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(snake_case ) ).to(snake_case )
if str(snake_case ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(snake_case )
else:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = VideoToVideoSDPipeline(**snake_case )
_UpperCAmelCase = sd_pipe.to(snake_case )
sd_pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = 'np'
_UpperCAmelCase = sd_pipe(**snake_case ).frames
_UpperCAmelCase = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
_UpperCAmelCase = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCamelCase_ ( self ) -> str:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=snake_case , expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def lowerCamelCase_ ( self ) -> int:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def lowerCamelCase_ ( self ) -> Any:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def lowerCamelCase_ ( self ) -> List[Any]:
pass
def lowerCamelCase_ ( self ) -> str:
return super().test_progress_bar()
@slow
@skip_mps
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
_UpperCAmelCase = torch.Generator(device='cpu' ).manual_seed(0 )
_UpperCAmelCase = torch.randn((1, 10, 3, 1024, 576) , generator=snake_case )
_UpperCAmelCase = video.to('cuda' )
_UpperCAmelCase = 'Spiderman is surfing'
_UpperCAmelCase = pipe(snake_case , video=snake_case , generator=snake_case , num_inference_steps=3 , output_type='pt' ).frames
_UpperCAmelCase = np.array([-1.0458984, -1.1279297, -0.9663086, -0.91503906, -0.75097656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2
| 24 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowercase = {
'''configuration_clipseg''': [
'''CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPSegConfig''',
'''CLIPSegTextConfig''',
'''CLIPSegVisionConfig''',
],
'''processing_clipseg''': ['''CLIPSegProcessor'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowercase = [
'''CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPSegModel''',
'''CLIPSegPreTrainedModel''',
'''CLIPSegTextModel''',
'''CLIPSegVisionModel''',
'''CLIPSegForImageSegmentation''',
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 24 | 1 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = BarthezTokenizer
_UpperCAmelCase = BarthezTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def lowerCamelCase_ ( self ) -> Optional[int]:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case )
_UpperCAmelCase = tokenizer
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case ) , 101122 )
def lowerCamelCase_ ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors='pt' )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsé.'
_UpperCAmelCase = tokenizer.tokenize(snake_case )
_UpperCAmelCase = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
_UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
_UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(snake_case )
_UpperCAmelCase = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
@slow
def lowerCamelCase_ ( self ) -> Optional[int]:
# fmt: off
_UpperCAmelCase = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=snake_case , )
| 24 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/swin-tiny-patch4-window7-224''': (
'''https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json'''
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class lowercase__ ( A, A ):
'''simple docstring'''
_UpperCAmelCase = '''swin'''
_UpperCAmelCase = {
'''num_attention_heads''': '''num_heads''',
'''num_hidden_layers''': '''num_layers''',
}
def __init__( self , snake_case=224 , snake_case=4 , snake_case=3 , snake_case=96 , snake_case=[2, 2, 6, 2] , snake_case=[3, 6, 12, 24] , snake_case=7 , snake_case=4.0 , snake_case=True , snake_case=0.0 , snake_case=0.0 , snake_case=0.1 , snake_case="gelu" , snake_case=False , snake_case=0.02 , snake_case=1E-5 , snake_case=32 , snake_case=None , snake_case=None , **snake_case , ) -> List[Any]:
super().__init__(**snake_case )
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(snake_case )
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(snake_case ) - 1) )
_UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-4
| 24 | 1 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
PNDMScheduler,
StableDiffusionLDMaDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import nightly, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
enable_full_determinism()
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = StableDiffusionLDMaDPipeline
_UpperCAmelCase = TEXT_TO_IMAGE_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_BATCH_PARAMS
_UpperCAmelCase = TEXT_TO_IMAGE_IMAGE_PARAMS
def lowerCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , )
_UpperCAmelCase = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=snake_case , set_alpha_to_one=snake_case , )
torch.manual_seed(0 )
_UpperCAmelCase = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=6 , out_channels=6 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , )
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , )
_UpperCAmelCase = CLIPTextModel(snake_case )
_UpperCAmelCase = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
_UpperCAmelCase = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Dict:
if str(snake_case ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(snake_case )
else:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
_UpperCAmelCase = {
'prompt': 'A painting of a squirrel eating a burger',
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionLDMaDPipeline(**snake_case )
_UpperCAmelCase = ldmad_pipe.to(snake_case )
ldmad_pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = ldmad_pipe(**snake_case )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb[0, -3:, -3:, -1]
_UpperCAmelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCAmelCase = np.array(
[0.37338176, 0.70247, 0.74203193, 0.51643604, 0.58256793, 0.60932136, 0.4181095, 0.48355877, 0.46535262] )
_UpperCAmelCase = np.array([103.46727, 85.812004, 87.849236] )
assert np.abs(image_slice_rgb.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(image_slice_depth.flatten() - expected_slice_depth ).max() < 1E-2
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = StableDiffusionLDMaDPipeline(**snake_case )
_UpperCAmelCase = ldmad_pipe.to(snake_case )
ldmad_pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = 3 * [inputs['prompt']]
# forward
_UpperCAmelCase = ldmad_pipe(**snake_case )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb_slice_a[0, -3:, -3:, -1]
_UpperCAmelCase = depth_slice_a[0, -3:, -1]
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = 3 * [inputs.pop('prompt' )]
_UpperCAmelCase = ldmad_pipe.tokenizer(
snake_case , padding='max_length' , max_length=ldmad_pipe.tokenizer.model_max_length , truncation=snake_case , return_tensors='pt' , )
_UpperCAmelCase = text_inputs['input_ids'].to(snake_case )
_UpperCAmelCase = ldmad_pipe.text_encoder(snake_case )[0]
_UpperCAmelCase = prompt_embeds
# forward
_UpperCAmelCase = ldmad_pipe(**snake_case )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb_slice_a[0, -3:, -3:, -1]
_UpperCAmelCase = depth_slice_a[0, -3:, -1]
assert np.abs(rgb_slice_a.flatten() - rgb_slice_a.flatten() ).max() < 1E-4
assert np.abs(depth_slice_a.flatten() - depth_slice_a.flatten() ).max() < 1E-4
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = 'cpu' # ensure determinism for the device-dependent torch.Generator
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = PNDMScheduler(skip_prk_steps=snake_case )
_UpperCAmelCase = StableDiffusionLDMaDPipeline(**snake_case )
_UpperCAmelCase = ldmad_pipe.to(snake_case )
ldmad_pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = 'french fries'
_UpperCAmelCase = ldmad_pipe(**snake_case , negative_prompt=snake_case )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb[0, -3:, -3:, -1]
_UpperCAmelCase = depth[0, -3:, -1]
assert rgb.shape == (1, 64, 64, 3)
assert depth.shape == (1, 64, 64)
_UpperCAmelCase = np.array(
[0.37044, 0.71811503, 0.7223251, 0.48603675, 0.5638391, 0.6364948, 0.42833704, 0.4901315, 0.47926217] )
_UpperCAmelCase = np.array([107.84738, 84.62802, 89.962135] )
assert np.abs(rgb_slice.flatten() - expected_slice_rgb ).max() < 1E-2
assert np.abs(depth_slice.flatten() - expected_slice_depth ).max() < 1E-2
@slow
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self , snake_case , snake_case="cpu" , snake_case=torch.floataa , snake_case=0 ) -> Union[str, Any]:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
_UpperCAmelCase = np.random.RandomState(snake_case ).standard_normal((1, 4, 64, 64) )
_UpperCAmelCase = torch.from_numpy(snake_case ).to(device=snake_case , dtype=snake_case )
_UpperCAmelCase = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 3,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' )
_UpperCAmelCase = ldmad_pipe.to(snake_case )
ldmad_pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_inputs(snake_case )
_UpperCAmelCase = ldmad_pipe(**snake_case )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = rgb[0, -3:, -3:, -1].flatten()
_UpperCAmelCase = rgb[0, -3:, -1].flatten()
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512)
_UpperCAmelCase = np.array(
[0.53805465, 0.56707305, 0.5486515, 0.57012236, 0.5814511, 0.56253487, 0.54843014, 0.55092263, 0.6459706] )
_UpperCAmelCase = np.array(
[0.9263781, 0.6678672, 0.5486515, 0.92202145, 0.67831135, 0.56253487, 0.9241694, 0.7551478, 0.6459706] )
assert np.abs(rgb_slice - expected_slice_rgb ).max() < 3E-3
assert np.abs(depth_slice - expected_slice_depth ).max() < 3E-3
@nightly
@require_torch_gpu
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self , snake_case , snake_case="cpu" , snake_case=torch.floataa , snake_case=0 ) -> List[str]:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
_UpperCAmelCase = np.random.RandomState(snake_case ).standard_normal((1, 4, 64, 64) )
_UpperCAmelCase = torch.from_numpy(snake_case ).to(device=snake_case , dtype=snake_case )
_UpperCAmelCase = {
'prompt': 'a photograph of an astronaut riding a horse',
'latents': latents,
'generator': generator,
'num_inference_steps': 50,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d' ).to(snake_case )
ldmad_pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_inputs(snake_case )
_UpperCAmelCase = ldmad_pipe(**snake_case )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = 0.495586
_UpperCAmelCase = 0.33795515
_UpperCAmelCase = 112.48518
_UpperCAmelCase = 98.489746
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = StableDiffusionLDMaDPipeline.from_pretrained('Intel/ldm3d-4c' ).to(snake_case )
ldmad_pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_inputs(snake_case )
_UpperCAmelCase = ldmad_pipe(**snake_case )
_UpperCAmelCase , _UpperCAmelCase = output.rgb, output.depth
_UpperCAmelCase = 0.4194127
_UpperCAmelCase = 0.35375586
_UpperCAmelCase = 0.5638502
_UpperCAmelCase = 0.34686103
assert rgb.shape == (1, 512, 512, 3)
assert depth.shape == (1, 512, 512, 1)
assert np.abs(expected_rgb_mean - rgb.mean() ) < 1E-3
assert np.abs(expected_rgb_std - rgb.std() ) < 1E-3
assert np.abs(expected_depth_mean - depth.mean() ) < 1E-3
assert np.abs(expected_depth_std - depth.std() ) < 1E-3
| 24 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self , snake_case = 16 , snake_case = 88 , snake_case = None , snake_case = 1 , snake_case = 0.0 , snake_case = 32 , snake_case = None , snake_case = False , snake_case = None , snake_case = None , snake_case = "geglu" , snake_case = None , ) -> str:
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=snake_case , attention_head_dim=snake_case , in_channels=snake_case , num_layers=snake_case , dropout=snake_case , norm_num_groups=snake_case , cross_attention_dim=snake_case , attention_bias=snake_case , sample_size=snake_case , num_vector_embeds=snake_case , activation_fn=snake_case , num_embeds_ada_norm=snake_case , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 257]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case=None , snake_case=None , snake_case=None , snake_case = True , ) -> Any:
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
snake_case , encoder_hidden_states=snake_case , timestep=snake_case , cross_attention_kwargs=snake_case , return_dict=snake_case , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=snake_case )
| 24 | 1 |
"""simple docstring"""
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def UpperCAmelCase ( A : Optional[int] , A : Dict ):
'''simple docstring'''
_UpperCAmelCase = []
for part_id in partition_order:
_UpperCAmelCase = df.where(f'SPARK_PARTITION_ID() = {part_id}' ).collect()
for row_idx, row in enumerate(A ):
expected_row_ids_and_row_dicts.append((f'{part_id}_{row_idx}', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_UpperCAmelCase = spark.range(100 ).repartition(1 )
_UpperCAmelCase = Spark(A )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_UpperCAmelCase = spark.range(10 ).repartition(2 )
_UpperCAmelCase = [1, 0]
_UpperCAmelCase = _generate_iterable_examples(A , A ) # Reverse the partitions.
_UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(A , A )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
_UpperCAmelCase , _UpperCAmelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_UpperCAmelCase = spark.range(10 ).repartition(1 )
_UpperCAmelCase = SparkExamplesIterable(A )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(A ):
assert row_id == f'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_UpperCAmelCase = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch('numpy.random.Generator' ) as generator_mock:
_UpperCAmelCase = lambda A : x.reverse()
_UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [2, 1, 0] )
_UpperCAmelCase = SparkExamplesIterable(A ).shuffle_data_sources(A )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(A ):
_UpperCAmelCase , _UpperCAmelCase = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_UpperCAmelCase = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
_UpperCAmelCase = SparkExamplesIterable(A ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
_UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [0, 2] )
for i, (row_id, row_dict) in enumerate(A ):
_UpperCAmelCase , _UpperCAmelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
_UpperCAmelCase = SparkExamplesIterable(A ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
_UpperCAmelCase = _get_expected_row_ids_and_row_dicts_for_partition_order(A , [1, 3] )
for i, (row_id, row_dict) in enumerate(A ):
_UpperCAmelCase , _UpperCAmelCase = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = pyspark.sql.SparkSession.builder.master('local[*]' ).appName('pyspark' ).getOrCreate()
_UpperCAmelCase = spark.range(100 ).repartition(1 )
_UpperCAmelCase = Spark(A )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 24 |
"""simple docstring"""
import inspect
import unittest
from math import floor
from transformers import CvtConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import CvtForImageClassification, CvtModel
from transformers.models.cvt.modeling_cvt import CVT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(snake_case , 'embed_dim' ) )
self.parent.assertTrue(hasattr(snake_case , 'num_heads' ) )
class lowercase__ :
'''simple docstring'''
def __init__( self , snake_case , snake_case=13 , snake_case=64 , snake_case=3 , snake_case=[16, 48, 96] , snake_case=[1, 3, 6] , snake_case=[1, 2, 10] , snake_case=[7, 3, 3] , snake_case=[4, 2, 2] , snake_case=[2, 1, 1] , snake_case=[2, 2, 2] , snake_case=[False, False, True] , snake_case=[0.0, 0.0, 0.0] , snake_case=0.02 , snake_case=1E-12 , snake_case=True , snake_case=True , snake_case=2 , ) -> Tuple:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_sizes
_UpperCAmelCase = patch_stride
_UpperCAmelCase = patch_padding
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = num_heads
_UpperCAmelCase = stride_kv
_UpperCAmelCase = depth
_UpperCAmelCase = cls_token
_UpperCAmelCase = attention_drop_rate
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def lowerCamelCase_ ( self ) -> List[str]:
return CvtConfig(
image_size=self.image_size , num_labels=self.num_labels , num_channels=self.num_channels , embed_dim=self.embed_dim , num_heads=self.num_heads , patch_sizes=self.patch_sizes , patch_padding=self.patch_padding , patch_stride=self.patch_stride , stride_kv=self.stride_kv , depth=self.depth , cls_token=self.cls_token , attention_drop_rate=self.attention_drop_rate , initializer_range=self.initializer_range , )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[int]:
_UpperCAmelCase = CvtModel(config=snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case )
_UpperCAmelCase = (self.image_size, self.image_size)
_UpperCAmelCase , _UpperCAmelCase = image_size[0], image_size[1]
for i in range(len(self.depth ) ):
_UpperCAmelCase = floor(((height + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
_UpperCAmelCase = floor(((width + 2 * self.patch_padding[i] - self.patch_sizes[i]) / self.patch_stride[i]) + 1 )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.embed_dim[-1], height, width) )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Optional[Any]:
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = CvtForImageClassification(snake_case )
model.to(snake_case )
model.eval()
_UpperCAmelCase = model(snake_case , labels=snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowercase__ ( A, A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = (CvtModel, CvtForImageClassification) if is_torch_available() else ()
_UpperCAmelCase = (
{'''feature-extraction''': CvtModel, '''image-classification''': CvtForImageClassification}
if is_torch_available()
else {}
)
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = CvtModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=snake_case , has_text_modality=snake_case , hidden_size=37 )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def lowerCamelCase_ ( self ) -> Union[str, Any]:
return
@unittest.skip(reason='Cvt does not output attentions' )
def lowerCamelCase_ ( self ) -> str:
pass
@unittest.skip(reason='Cvt does not use inputs_embeds' )
def lowerCamelCase_ ( self ) -> int:
pass
@unittest.skip(reason='Cvt does not support input and output embeddings' )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
pass
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(snake_case )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['pixel_values']
self.assertListEqual(arg_names[:1] , snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
def check_hidden_states_output(snake_case , snake_case , snake_case ):
_UpperCAmelCase = model_class(snake_case )
model.to(snake_case )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(snake_case , snake_case ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = len(self.model_tester.depth )
self.assertEqual(len(snake_case ) , snake_case )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) , [
self.model_tester.embed_dim[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] , )
_UpperCAmelCase , _UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(snake_case , snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*snake_case )
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def lowerCamelCase_ ( self ) -> Dict:
pass
@slow
def lowerCamelCase_ ( self ) -> Dict:
for model_name in CVT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = CvtModel.from_pretrained(snake_case )
self.assertIsNotNone(snake_case )
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def lowerCamelCase_ ( self ) -> List[Any]:
return AutoImageProcessor.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = CvtForImageClassification.from_pretrained(CVT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(snake_case )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=snake_case , return_tensors='pt' ).to(snake_case )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**snake_case )
# verify the logits
_UpperCAmelCase = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape , snake_case )
_UpperCAmelCase = torch.tensor([0.9285, 0.9015, -0.3150] ).to(snake_case )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , snake_case , atol=1E-4 ) )
| 24 | 1 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES, BertTokenizer
from transformers.testing_utils import require_tokenizers, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor, ViTImageProcessor
@require_tokenizers
@require_vision
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = tempfile.mkdtemp()
# fmt: off
_UpperCAmelCase = ['[UNK]', '[CLS]', '[SEP]', '[PAD]', '[MASK]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing', ',', 'low', 'lowest']
# fmt: on
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
_UpperCAmelCase = {
'do_resize': True,
'size': {'height': 18, 'width': 18},
'do_normalize': True,
'image_mean': [0.5, 0.5, 0.5],
'image_std': [0.5, 0.5, 0.5],
}
_UpperCAmelCase = os.path.join(self.tmpdirname , snake_case )
with open(self.image_processor_file , 'w' , encoding='utf-8' ) as fp:
json.dump(snake_case , snake_case )
def lowerCamelCase_ ( self , **snake_case ) -> Union[str, Any]:
return BertTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self , **snake_case ) -> int:
return ViTImageProcessor.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
shutil.rmtree(self.tmpdirname )
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = [np.random.randint(255 , size=(3, 30, 400) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(snake_case , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = VisionTextDualEncoderProcessor(
tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(bos_token='(BOS)' , eos_token='(EOS)' )
_UpperCAmelCase = self.get_image_processor(do_normalize=snake_case , padding_value=1.0 )
_UpperCAmelCase = VisionTextDualEncoderProcessor.from_pretrained(
self.tmpdirname , bos_token='(BOS)' , eos_token='(EOS)' , do_normalize=snake_case , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , (BertTokenizer, BertTokenizerFast) )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , snake_case )
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(snake_case , return_tensors='np' )
_UpperCAmelCase = processor(images=snake_case , return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1E-2 )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = processor(text=snake_case )
_UpperCAmelCase = tokenizer(snake_case )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , ['input_ids', 'token_type_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with self.assertRaises(snake_case ):
processor()
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(snake_case )
_UpperCAmelCase = tokenizer.batch_decode(snake_case )
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = VisionTextDualEncoderProcessor(tokenizer=snake_case , image_processor=snake_case )
_UpperCAmelCase = 'lower newer'
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=snake_case , images=snake_case )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 24 |
"""simple docstring"""
from __future__ import annotations
from cmath import sqrt
def UpperCAmelCase ( A : int , A : int , A : int ):
'''simple docstring'''
if a == 0:
raise ValueError('Coefficient \'a\' must not be zero.' )
_UpperCAmelCase = b * b - 4 * a * c
_UpperCAmelCase = (-b + sqrt(A )) / (2 * a)
_UpperCAmelCase = (-b - sqrt(A )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def UpperCAmelCase ( ):
'''simple docstring'''
_UpperCAmelCase , _UpperCAmelCase = quadratic_roots(a=5 , b=6 , c=1 )
print(f'The solutions are: {solutiona} and {solutiona}' )
if __name__ == "__main__":
main()
| 24 | 1 |
"""simple docstring"""
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_UpperCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , A ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowercase = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowercase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 |
"""simple docstring"""
import unittest
from transformers import BarthezTokenizer, BarthezTokenizerFast, BatchEncoding
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
@require_sentencepiece
@slow # see https://github.com/huggingface/transformers/issues/11457
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = BarthezTokenizer
_UpperCAmelCase = BarthezTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def lowerCamelCase_ ( self ) -> Optional[int]:
super().setUp()
_UpperCAmelCase = BarthezTokenizerFast.from_pretrained('moussaKam/mbarthez' )
tokenizer.save_pretrained(self.tmpdirname )
tokenizer.save_pretrained(self.tmpdirname , legacy_format=snake_case )
_UpperCAmelCase = tokenizer
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(snake_case ) , snake_case )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(snake_case ) , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '<s>' )
self.assertEqual(vocab_keys[1] , '<pad>' )
self.assertEqual(vocab_keys[-1] , '<mask>' )
self.assertEqual(len(snake_case ) , 101122 )
def lowerCamelCase_ ( self ) -> List[Any]:
self.assertEqual(self.get_tokenizer().vocab_size , 101122 )
@require_torch
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
_UpperCAmelCase = [0, 57, 3018, 70307, 91, 2]
_UpperCAmelCase = self.tokenizer(
snake_case , max_length=len(snake_case ) , padding=snake_case , truncation=snake_case , return_tensors='pt' )
self.assertIsInstance(snake_case , snake_case )
self.assertEqual((2, 6) , batch.input_ids.shape )
self.assertEqual((2, 6) , batch.attention_mask.shape )
_UpperCAmelCase = batch.input_ids.tolist()[0]
self.assertListEqual(snake_case , snake_case )
def lowerCamelCase_ ( self ) -> Optional[Any]:
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = 'I was born in 92000, and this is falsé.'
_UpperCAmelCase = tokenizer.tokenize(snake_case )
_UpperCAmelCase = rust_tokenizer.tokenize(snake_case )
self.assertListEqual(snake_case , snake_case )
_UpperCAmelCase = tokenizer.encode(snake_case , add_special_tokens=snake_case )
_UpperCAmelCase = rust_tokenizer.encode(snake_case , add_special_tokens=snake_case )
self.assertListEqual(snake_case , snake_case )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(snake_case )
_UpperCAmelCase = rust_tokenizer.encode(snake_case )
self.assertListEqual(snake_case , snake_case )
@slow
def lowerCamelCase_ ( self ) -> Optional[int]:
# fmt: off
_UpperCAmelCase = {'input_ids': [[0, 490, 14328, 4507, 354, 47, 43669, 95, 25, 78117, 20215, 19779, 190, 22, 400, 4, 35343, 80310, 603, 86, 24937, 105, 33438, 94762, 196, 39642, 7, 15, 15933, 173, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [0, 10534, 87, 25, 66, 3358, 196, 55289, 8, 82961, 81, 2204, 75203, 7, 15, 763, 12956, 216, 178, 14328, 9595, 1377, 69693, 7, 448, 71021, 196, 18106, 1437, 13974, 108, 9083, 4, 49315, 7, 39, 86, 1326, 2793, 46333, 4, 448, 196, 74588, 7, 49315, 7, 39, 21, 822, 38470, 74, 21, 66723, 62480, 8, 22050, 5, 2]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# moussaKam/mbarthez is a french model. So we also use french texts.
_UpperCAmelCase = [
'Le transformeur est un modèle d\'apprentissage profond introduit en 2017, '
'utilisé principalement dans le domaine du traitement automatique des langues (TAL).',
'À l\'instar des réseaux de neurones récurrents (RNN), les transformeurs sont conçus '
'pour gérer des données séquentielles, telles que le langage naturel, pour des tâches '
'telles que la traduction et la synthèse de texte.',
]
self.tokenizer_integration_test_util(
expected_encoding=snake_case , model_name='moussaKam/mbarthez' , revision='c2e4ecbca5e3cd2c37fe1ac285ca4fbdf1366fb6' , sequences=snake_case , )
| 24 | 1 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
lowercase = logging.get_logger(__name__)
lowercase = {
'''microsoft/resnet-50''': '''https://huggingface.co/microsoft/resnet-50/blob/main/config.json''',
}
class lowercase__ ( A, A ):
'''simple docstring'''
_UpperCAmelCase = '''resnet'''
_UpperCAmelCase = ['''basic''', '''bottleneck''']
def __init__( self , snake_case=3 , snake_case=64 , snake_case=[256, 512, 1024, 2048] , snake_case=[3, 4, 6, 3] , snake_case="bottleneck" , snake_case="relu" , snake_case=False , snake_case=None , snake_case=None , **snake_case , ) -> Optional[int]:
super().__init__(**snake_case )
if layer_type not in self.layer_types:
raise ValueError(f'layer_type={layer_type} is not one of {",".join(self.layer_types )}' )
_UpperCAmelCase = num_channels
_UpperCAmelCase = embedding_size
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = layer_type
_UpperCAmelCase = hidden_act
_UpperCAmelCase = downsample_in_first_stage
_UpperCAmelCase = ['stem'] + [f'stage{idx}' for idx in range(1 , len(snake_case ) + 1 )]
_UpperCAmelCase , _UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=snake_case , out_indices=snake_case , stage_names=self.stage_names )
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = version.parse('''1.11''' )
@property
def lowerCamelCase_ ( self ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def lowerCamelCase_ ( self ) -> float:
return 1E-3
| 24 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from diffusers import AutoencoderKL, DDIMScheduler, DiTPipeline, DPMSolverMultistepScheduler, TransformeraDModel
from diffusers.utils import is_xformers_available, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS,
CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = DiTPipeline
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_PARAMS
_UpperCAmelCase = PipelineTesterMixin.required_optional_params - {
'''latents''',
'''num_images_per_prompt''',
'''callback''',
'''callback_steps''',
}
_UpperCAmelCase = CLASS_CONDITIONED_IMAGE_GENERATION_BATCH_PARAMS
_UpperCAmelCase = False
def lowerCamelCase_ ( self ) -> str:
torch.manual_seed(0 )
_UpperCAmelCase = TransformeraDModel(
sample_size=16 , num_layers=2 , patch_size=4 , attention_head_dim=8 , num_attention_heads=2 , in_channels=4 , out_channels=8 , attention_bias=snake_case , activation_fn='gelu-approximate' , num_embeds_ada_norm=1000 , norm_type='ada_norm_zero' , norm_elementwise_affine=snake_case , )
_UpperCAmelCase = AutoencoderKL()
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = {'transformer': transformer.eval(), 'vae': vae.eval(), 'scheduler': scheduler}
return components
def lowerCamelCase_ ( self , snake_case , snake_case=0 ) -> Optional[Any]:
if str(snake_case ).startswith('mps' ):
_UpperCAmelCase = torch.manual_seed(snake_case )
else:
_UpperCAmelCase = torch.Generator(device=snake_case ).manual_seed(snake_case )
_UpperCAmelCase = {
'class_labels': [1],
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
def lowerCamelCase_ ( self ) -> List[Any]:
_UpperCAmelCase = 'cpu'
_UpperCAmelCase = self.get_dummy_components()
_UpperCAmelCase = self.pipeline_class(**snake_case )
pipe.to(snake_case )
pipe.set_progress_bar_config(disable=snake_case )
_UpperCAmelCase = self.get_dummy_inputs(snake_case )
_UpperCAmelCase = pipe(**snake_case ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
self.assertEqual(image.shape , (1, 16, 16, 3) )
_UpperCAmelCase = np.array([0.2946, 0.6601, 0.4329, 0.3296, 0.4144, 0.5319, 0.7273, 0.5013, 0.4457] )
_UpperCAmelCase = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(snake_case , 1E-3 )
def lowerCamelCase_ ( self ) -> Any:
self._test_inference_batch_single_identical(relax_max_difference=snake_case , expected_max_diff=1E-3 )
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def lowerCamelCase_ ( self ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
@require_torch_gpu
@slow
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> int:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-256' )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella', 'white shark', 'white wolf']
_UpperCAmelCase = pipe.get_label_ids(snake_case )
_UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=40 , output_type='np' ).images
for word, image in zip(snake_case , snake_case ):
_UpperCAmelCase = load_numpy(
f'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/dit/{word}.npy' )
assert np.abs((expected_image - image).max() ) < 1E-2
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = DiTPipeline.from_pretrained('facebook/DiT-XL-2-512' )
_UpperCAmelCase = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.to('cuda' )
_UpperCAmelCase = ['vase', 'umbrella']
_UpperCAmelCase = pipe.get_label_ids(snake_case )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(snake_case , generator=snake_case , num_inference_steps=25 , output_type='np' ).images
for word, image in zip(snake_case , snake_case ):
_UpperCAmelCase = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
f'/dit/{word}_512.npy' )
assert np.abs((expected_image - image).max() ) < 1E-1
| 24 | 1 |
"""simple docstring"""
import pprint
import requests
lowercase = '''https://zenquotes.io/api'''
def UpperCAmelCase ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/today' ).json()
def UpperCAmelCase ( ):
'''simple docstring'''
return requests.get(API_ENDPOINT_URL + '/random' ).json()
if __name__ == "__main__":
lowercase = random_quotes()
pprint.pprint(response)
| 24 |
"""simple docstring"""
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
_UpperCAmelCase = 0
while n > 0:
res += n % 10
n //= 10
return res
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = abs(A )
return n if n < 10 else n % 10 + sum_of_digits(n // 10 )
def UpperCAmelCase ( A : int ):
'''simple docstring'''
return sum(int(A ) for c in str(abs(A ) ) )
def UpperCAmelCase ( ):
'''simple docstring'''
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(A : Callable , A : int ) -> None:
_UpperCAmelCase = f'{func.__name__}({value})'
_UpperCAmelCase = timeit(f'__main__.{call}' , setup='import __main__' )
print(f'{call:56} = {func(A )} -- {timing:.4f} seconds' )
for value in (26_2144, 1125_8999_0684_2624, 126_7650_6002_2822_9401_4967_0320_5376):
for func in (sum_of_digits, sum_of_digits_recursion, sum_of_digits_compact):
benchmark_a_function(A , A )
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 24 | 1 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self , snake_case , snake_case=7 , snake_case=3 , snake_case=18 , snake_case=30 , snake_case=400 , snake_case=True , snake_case=None , snake_case=True , snake_case=None , snake_case=True , snake_case=[0.5, 0.5, 0.5] , snake_case=[0.5, 0.5, 0.5] , ) -> Optional[int]:
_UpperCAmelCase = size if size is not None else {'shortest_edge': 18}
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 18, 'width': 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def lowerCamelCase_ ( self ) -> Union[str, Any]:
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = LevitImageProcessor if is_vision_available() else None
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = LevitImageProcessingTester(self )
@property
def lowerCamelCase_ ( self ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(snake_case , 'image_mean' ) )
self.assertTrue(hasattr(snake_case , 'image_std' ) )
self.assertTrue(hasattr(snake_case , 'do_normalize' ) )
self.assertTrue(hasattr(snake_case , 'do_resize' ) )
self.assertTrue(hasattr(snake_case , 'do_center_crop' ) )
self.assertTrue(hasattr(snake_case , 'size' ) )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {'shortest_edge': 18} )
self.assertEqual(image_processor.crop_size , {'height': 18, 'width': 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {'shortest_edge': 42} )
self.assertEqual(image_processor.crop_size , {'height': 84, 'width': 84} )
def lowerCamelCase_ ( self ) -> Tuple:
pass
def lowerCamelCase_ ( self ) -> Optional[int]:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase_ ( self ) -> Tuple:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , numpify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
def lowerCamelCase_ ( self ) -> Tuple:
# Initialize image_processing
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=snake_case , torchify=snake_case )
for image in image_inputs:
self.assertIsInstance(snake_case , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
# Test batched
_UpperCAmelCase = image_processing(snake_case , return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size['height'],
self.image_processor_tester.crop_size['width'],
) , )
| 24 |
"""simple docstring"""
from __future__ import annotations
def UpperCAmelCase ( A : int , A : int ):
'''simple docstring'''
_UpperCAmelCase = []
create_all_state(1 , A , A , [] , A )
return result
def UpperCAmelCase ( A : int , A : int , A : int , A : list[int] , A : list[list[int]] , ):
'''simple docstring'''
if level == 0:
total_list.append(current_list[:] )
return
for i in range(A , total_number - level + 2 ):
current_list.append(A )
create_all_state(i + 1 , A , level - 1 , A , A )
current_list.pop()
def UpperCAmelCase ( A : list[list[int]] ):
'''simple docstring'''
for i in total_list:
print(*A )
if __name__ == "__main__":
lowercase = 4
lowercase = 2
lowercase = generate_all_combinations(n, k)
print_all_state(total_list)
| 24 | 1 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_DEFAULT_MEAN,
IMAGENET_DEFAULT_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_batched,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowercase = logging.get_logger(__name__)
class lowercase__ ( A ):
'''simple docstring'''
_UpperCAmelCase = ['''pixel_values''']
def __init__( self , snake_case = True , snake_case = None , snake_case = PILImageResampling.BICUBIC , snake_case = True , snake_case = True , snake_case = 1 / 255 , snake_case = None , snake_case = True , snake_case = None , snake_case = None , **snake_case , ) -> None:
super().__init__(**snake_case )
_UpperCAmelCase = size if size is not None else {'height': 224, 'width': 224}
_UpperCAmelCase = get_size_dict(snake_case )
_UpperCAmelCase = crop_size if crop_size is not None else {'height': 224, 'width': 224}
_UpperCAmelCase = get_size_dict(snake_case , default_to_square=snake_case , param_name='crop_size' )
_UpperCAmelCase = do_resize
_UpperCAmelCase = do_rescale
_UpperCAmelCase = do_normalize
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_DEFAULT_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_DEFAULT_STD
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case = PILImageResampling.BILINEAR , snake_case = None , **snake_case , ) -> np.ndarray:
_UpperCAmelCase = get_size_dict(snake_case )
if "shortest_edge" in size:
_UpperCAmelCase = get_resize_output_image_size(snake_case , size=size['shortest_edge'] , default_to_square=snake_case )
# size = get_resize_output_image_size(image, size["shortest_edge"], size["longest_edge"])
elif "height" in size and "width" in size:
_UpperCAmelCase = (size['height'], size['width'])
else:
raise ValueError(f'Size must contain \'height\' and \'width\' keys or \'shortest_edge\' key. Got {size.keys()}' )
return resize(snake_case , size=snake_case , resample=snake_case , data_format=snake_case , **snake_case )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case = None , **snake_case , ) -> np.ndarray:
_UpperCAmelCase = get_size_dict(snake_case )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(snake_case , size=(size['height'], size['width']) , data_format=snake_case , **snake_case )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case = None , **snake_case ) -> np.ndarray:
return rescale(snake_case , scale=snake_case , data_format=snake_case , **snake_case )
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case , snake_case = None , **snake_case , ) -> np.ndarray:
return normalize(snake_case , mean=snake_case , std=snake_case , data_format=snake_case , **snake_case )
def lowerCamelCase_ ( self , snake_case , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = None , snake_case = ChannelDimension.FIRST , **snake_case , ) -> BatchFeature:
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(snake_case , param_name='crop_size' , default_to_square=snake_case )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(snake_case )
if not is_batched(snake_case ):
_UpperCAmelCase = [images]
if not valid_images(snake_case ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(snake_case ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=snake_case , size=snake_case , resample=snake_case ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=snake_case , size=snake_case ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=snake_case , scale=snake_case ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=snake_case , mean=snake_case , std=snake_case ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(snake_case , snake_case ) for image in images]
_UpperCAmelCase = {'pixel_values': images}
return BatchFeature(data=snake_case , tensor_type=snake_case )
| 24 |
"""simple docstring"""
import logging
import os
import sys
from pathlib import Path
from unittest.mock import patch
from parameterized import parameterized
from run_eval import run_generate
from run_eval_search import run_search
from transformers.testing_utils import CaptureStdout, TestCasePlus, slow
from utils import ROUGE_KEYS
logging.basicConfig(level=logging.DEBUG)
lowercase = logging.getLogger()
def UpperCAmelCase ( A : Path , A : list ):
'''simple docstring'''
_UpperCAmelCase = '\n'.join(A )
Path(A ).open('w' ).writelines(A )
lowercase = '''patrickvonplaten/t5-tiny-random'''
lowercase = '''sshleifer/bart-tiny-random'''
lowercase = '''sshleifer/tiny-mbart'''
lowercase = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
logging.disable(logging.CRITICAL) # remove noisy download output from tracebacks
class lowercase__ ( A ):
'''simple docstring'''
def lowerCamelCase_ ( self , snake_case ) -> str:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = [' New York (CNN)When Liana Barrientos was 23 years old, she got married in Westchester County.']
_dump_articles(snake_case , snake_case )
_UpperCAmelCase = str(Path(self.get_auto_remove_tmp_dir() ) / 'scores.json' )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {input_file_name}\n {output_file_name}\n --score_path {score_path}\n --task {task}\n --num_beams 2\n --length_penalty 2.0\n '.split()
with patch.object(snake_case , 'argv' , snake_case ):
run_generate()
assert Path(snake_case ).exists()
# os.remove(Path(output_file_name))
def lowerCamelCase_ ( self ) -> str:
self.run_eval_tester(snake_case )
@parameterized.expand([BART_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> List[Any]:
self.run_eval_tester(snake_case )
@parameterized.expand([T5_TINY, MBART_TINY] )
@slow
def lowerCamelCase_ ( self , snake_case ) -> Dict:
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() ) / 'utest_input.source'
_UpperCAmelCase = input_file_name.parent / 'utest_output.txt'
assert not output_file_name.exists()
_UpperCAmelCase = {
'en': ['Machine learning is great, isn\'t it?', 'I like to eat bananas', 'Tomorrow is another great day!'],
'de': [
'Maschinelles Lernen ist großartig, oder?',
'Ich esse gerne Bananen',
'Morgen ist wieder ein toller Tag!',
],
}
_UpperCAmelCase = Path(self.get_auto_remove_tmp_dir() )
_UpperCAmelCase = str(tmp_dir / 'scores.json' )
_UpperCAmelCase = str(tmp_dir / 'val.target' )
_dump_articles(snake_case , text['en'] )
_dump_articles(snake_case , text['de'] )
_UpperCAmelCase = 'translation_en_to_de' if model == T5_TINY else 'summarization'
_UpperCAmelCase = f'\n run_eval_search.py\n {model}\n {str(snake_case )}\n {str(snake_case )}\n --score_path {score_path}\n --reference_path {reference_path}\n --task {task}\n '.split()
testargs.extend(['--search', 'num_beams=1:2 length_penalty=0.9:1.0'] )
with patch.object(snake_case , 'argv' , snake_case ):
with CaptureStdout() as cs:
run_search()
_UpperCAmelCase = [' num_beams | length_penalty', model, 'Best score args']
_UpperCAmelCase = ['Info']
if "translation" in task:
expected_strings.append('bleu' )
else:
expected_strings.extend(snake_case )
for w in expected_strings:
assert w in cs.out
for w in un_expected_strings:
assert w not in cs.out
assert Path(snake_case ).exists()
os.remove(Path(snake_case ) )
| 24 | 1 |
"""simple docstring"""
from functools import reduce
lowercase = (
'''73167176531330624919225119674426574742355349194934'''
'''96983520312774506326239578318016984801869478851843'''
'''85861560789112949495459501737958331952853208805511'''
'''12540698747158523863050715693290963295227443043557'''
'''66896648950445244523161731856403098711121722383113'''
'''62229893423380308135336276614282806444486645238749'''
'''30358907296290491560440772390713810515859307960866'''
'''70172427121883998797908792274921901699720888093776'''
'''65727333001053367881220235421809751254540594752243'''
'''52584907711670556013604839586446706324415722155397'''
'''53697817977846174064955149290862569321978468622482'''
'''83972241375657056057490261407972968652414535100474'''
'''82166370484403199890008895243450658541227588666881'''
'''16427171479924442928230863465674813919123162824586'''
'''17866458359124566529476545682848912883142607690042'''
'''24219022671055626321111109370544217506941658960408'''
'''07198403850962455444362981230987879927244284909188'''
'''84580156166097919133875499200524063689912560717606'''
'''05886116467109405077541002256983155200055935729725'''
'''71636269561882670428252483600823257530420752963450'''
)
def UpperCAmelCase ( A : str = N ):
'''simple docstring'''
return max(
# mypy cannot properly interpret reduce
int(reduce(lambda A , A : str(int(A ) * int(A ) ) , n[i : i + 13] ) )
for i in range(len(A ) - 12 ) )
if __name__ == "__main__":
print(F'''{solution() = }''')
| 24 |
"""simple docstring"""
from typing import List, Optional, TypeVar
from .arrow_dataset import Dataset, _concatenate_map_style_datasets, _interleave_map_style_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .info import DatasetInfo
from .iterable_dataset import IterableDataset, _concatenate_iterable_datasets, _interleave_iterable_datasets
from .splits import NamedSplit
from .utils import logging
from .utils.py_utils import Literal
lowercase = logging.get_logger(__name__)
lowercase = TypeVar('''DatasetType''', Dataset, IterableDataset)
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[List[float]] = None , A : Optional[int] = None , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : Literal["first_exhausted", "all_exhausted"] = "first_exhausted" , ):
'''simple docstring'''
from .arrow_dataset import Dataset
from .iterable_dataset import IterableDataset
if not datasets:
raise ValueError('Unable to interleave an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if stopping_strategy not in ["first_exhausted", "all_exhausted"]:
raise ValueError(f'{stopping_strategy} is not supported. Please enter a valid stopping_strategy.' )
if dataset_type is Dataset:
return _interleave_map_style_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
else:
return _interleave_iterable_datasets(
A , A , A , info=A , split=A , stopping_strategy=A )
def UpperCAmelCase ( A : List[DatasetType] , A : Optional[DatasetInfo] = None , A : Optional[NamedSplit] = None , A : int = 0 , ):
'''simple docstring'''
if not dsets:
raise ValueError('Unable to concatenate an empty list of datasets.' )
for i, dataset in enumerate(A ):
if not isinstance(A , (Dataset, IterableDataset) ):
if isinstance(A , (DatasetDict, IterableDatasetDict) ):
if not dataset:
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} '
'is an empty dataset dictionary.' )
raise ValueError(
f'Dataset at position {i} has at least one split: {list(A )}\n'
f'Please pick one to interleave with the other datasets, for example: dataset[\'{next(iter(A ) )}\']' )
raise ValueError(
f'Expected a list of Dataset objects or a list of IterableDataset objects, but element at position {i} is a {type(A ).__name__}.' )
if i == 0:
_UpperCAmelCase , _UpperCAmelCase = (
(Dataset, IterableDataset) if isinstance(A , A ) else (IterableDataset, Dataset)
)
elif not isinstance(A , A ):
raise ValueError(
f'Unable to interleave a {dataset_type.__name__} (at position 0) with a {other_type.__name__} (at position {i}). Expected a list of Dataset objects or a list of IterableDataset objects.' )
if dataset_type is Dataset:
return _concatenate_map_style_datasets(A , info=A , split=A , axis=A )
else:
return _concatenate_iterable_datasets(A , info=A , split=A , axis=A )
| 24 | 1 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case )
return generator, ["Something to write", "Something else"]
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = generator('Something there' )
self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
_UpperCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
_UpperCAmelCase = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
with self.assertRaises(snake_case ):
generator(4 )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
_UpperCAmelCase = 3
_UpperCAmelCase = generator(
'Something there' , num_return_sequences=snake_case , num_beams=snake_case , )
_UpperCAmelCase = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(snake_case , snake_case )
_UpperCAmelCase = generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case )
self.assertEqual(
snake_case , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
_UpperCAmelCase = generator.model.config.eos_token_id
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = generator(
['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , )
self.assertEqual(
snake_case , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
| 24 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TextaTextGenerationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, require_tf, require_torch
from transformers.utils import is_torch_available
from .test_pipelines_common import ANY
if is_torch_available():
import torch
@is_pipeline_test
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
_UpperCAmelCase = TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
def lowerCamelCase_ ( self , snake_case , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = TextaTextGenerationPipeline(model=snake_case , tokenizer=snake_case )
return generator, ["Something to write", "Something else"]
def lowerCamelCase_ ( self , snake_case , snake_case ) -> Dict:
_UpperCAmelCase = generator('Something there' )
self.assertEqual(snake_case , [{'generated_text': ANY(snake_case )}] )
# These are encoder decoder, they don't just append to incoming string
self.assertFalse(outputs[0]['generated_text'].startswith('Something there' ) )
_UpperCAmelCase = generator(['This is great !', 'Something else'] , num_return_sequences=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
_UpperCAmelCase = generator(
['This is great !', 'Something else'] , num_return_sequences=2 , batch_size=2 , do_sample=snake_case )
self.assertEqual(
snake_case , [
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
[{'generated_text': ANY(snake_case )}, {'generated_text': ANY(snake_case )}],
] , )
with self.assertRaises(snake_case ):
generator(4 )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='pt' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
_UpperCAmelCase = 3
_UpperCAmelCase = generator(
'Something there' , num_return_sequences=snake_case , num_beams=snake_case , )
_UpperCAmelCase = [
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': 'Beide Beide Beide Beide Beide Beide Beide Beide'},
{'generated_text': ''},
]
self.assertEqual(snake_case , snake_case )
_UpperCAmelCase = generator('This is a test' , do_sample=snake_case , num_return_sequences=2 , return_tensors=snake_case )
self.assertEqual(
snake_case , [
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
] , )
_UpperCAmelCase = generator.model.config.eos_token_id
_UpperCAmelCase = '<pad>'
_UpperCAmelCase = generator(
['This is a test', 'This is a second test'] , do_sample=snake_case , num_return_sequences=2 , batch_size=2 , return_tensors=snake_case , )
self.assertEqual(
snake_case , [
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
[
{'generated_token_ids': ANY(torch.Tensor )},
{'generated_token_ids': ANY(torch.Tensor )},
],
] , )
@require_tf
def lowerCamelCase_ ( self ) -> Any:
_UpperCAmelCase = pipeline('text2text-generation' , model='patrickvonplaten/t5-tiny-random' , framework='tf' )
# do_sample=False necessary for reproducibility
_UpperCAmelCase = generator('Something there' , do_sample=snake_case )
self.assertEqual(snake_case , [{'generated_text': ''}] )
| 24 | 1 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
# Register SEW's fairseq modules
from sew_asapp import tasks # noqa: F401
from transformers import (
SEWConfig,
SEWForCTC,
SEWModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
lowercase = logging.get_logger(__name__)
lowercase = {
'''post_extract_proj''': '''feature_projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.upsample.0''': '''encoder.upsample.projection''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def UpperCAmelCase ( A : Optional[int] , A : Optional[Any] , A : str , A : Optional[int] , A : Dict ):
'''simple docstring'''
for attribute in key.split('.' ):
_UpperCAmelCase = getattr(A , A )
if weight_type is not None:
_UpperCAmelCase = getattr(A , A ).shape
else:
_UpperCAmelCase = hf_pointer.shape
assert hf_shape == value.shape, (
f'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
f' {value.shape} for {full_name}'
)
if weight_type == "weight":
_UpperCAmelCase = value
elif weight_type == "weight_g":
_UpperCAmelCase = value
elif weight_type == "weight_v":
_UpperCAmelCase = value
elif weight_type == "bias":
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
logger.info(f'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def UpperCAmelCase ( A : Optional[Any] , A : str , A : List[Any] ):
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = fairseq_model.state_dict()
_UpperCAmelCase = hf_model.sew.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
_UpperCAmelCase = False
if "conv_layers" in name:
load_conv_layer(
A , A , A , A , hf_model.config.feat_extract_norm == 'group' , )
_UpperCAmelCase = True
else:
for key, mapped_key in MAPPING.items():
_UpperCAmelCase = 'sew.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or key.split('w2v_model.' )[-1] == name.split('.' )[0]:
_UpperCAmelCase = True
if "*" in mapped_key:
_UpperCAmelCase = name.split(A )[0].split('.' )[-2]
_UpperCAmelCase = mapped_key.replace('*' , A )
if "weight_g" in name:
_UpperCAmelCase = 'weight_g'
elif "weight_v" in name:
_UpperCAmelCase = 'weight_v'
elif "weight" in name:
_UpperCAmelCase = 'weight'
elif "bias" in name:
_UpperCAmelCase = 'bias'
else:
_UpperCAmelCase = None
set_recursively(A , A , A , A , A )
continue
if not is_used:
unused_weights.append(A )
logger.warning(f'Unused weights: {unused_weights}' )
def UpperCAmelCase ( A : List[Any] , A : List[str] , A : List[str] , A : List[str] , A : List[str] ):
'''simple docstring'''
_UpperCAmelCase = full_name.split('conv_layers.' )[-1]
_UpperCAmelCase = name.split('.' )
_UpperCAmelCase = int(items[0] )
_UpperCAmelCase = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
_UpperCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
_UpperCAmelCase = value
logger.info(f'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
f'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
_UpperCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
f'{full_name} has size {value.shape}, but'
f' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
_UpperCAmelCase = value
logger.info(f'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(A )
def UpperCAmelCase ( A : Dict , A : Any ):
'''simple docstring'''
_UpperCAmelCase = SEWConfig()
if is_finetuned:
_UpperCAmelCase = model.wav_encoder.wav_model.cfg
else:
_UpperCAmelCase = model.cfg
_UpperCAmelCase = fs_config.conv_bias
_UpperCAmelCase = eval(fs_config.conv_feature_layers )
_UpperCAmelCase = [x[0] for x in conv_layers]
_UpperCAmelCase = [x[1] for x in conv_layers]
_UpperCAmelCase = [x[2] for x in conv_layers]
_UpperCAmelCase = 'gelu'
_UpperCAmelCase = 'layer' if fs_config.extractor_mode == 'layer_norm' else 'group'
_UpperCAmelCase = 0.0
_UpperCAmelCase = fs_config.activation_fn.name
_UpperCAmelCase = fs_config.encoder_embed_dim
_UpperCAmelCase = 0.02
_UpperCAmelCase = fs_config.encoder_ffn_embed_dim
_UpperCAmelCase = 1e-5
_UpperCAmelCase = fs_config.encoder_layerdrop
_UpperCAmelCase = fs_config.encoder_attention_heads
_UpperCAmelCase = fs_config.conv_pos_groups
_UpperCAmelCase = fs_config.conv_pos
_UpperCAmelCase = len(A )
_UpperCAmelCase = fs_config.encoder_layers
_UpperCAmelCase = fs_config.squeeze_factor
# take care of any params that are overridden by the Wav2VecCtc model
if is_finetuned:
_UpperCAmelCase = model.cfg
_UpperCAmelCase = fs_config.final_dropout
_UpperCAmelCase = fs_config.layerdrop
_UpperCAmelCase = fs_config.activation_dropout
_UpperCAmelCase = fs_config.mask_prob > 0 or fs_config.mask_channel_prob > 0
_UpperCAmelCase = fs_config.attention_dropout
_UpperCAmelCase = fs_config.dropout_input
_UpperCAmelCase = fs_config.dropout
_UpperCAmelCase = fs_config.mask_channel_length
_UpperCAmelCase = fs_config.mask_channel_prob
_UpperCAmelCase = fs_config.mask_length
_UpperCAmelCase = fs_config.mask_prob
_UpperCAmelCase = 'Wav2Vec2FeatureExtractor'
_UpperCAmelCase = 'Wav2Vec2CTCTokenizer'
return config
@torch.no_grad()
def UpperCAmelCase ( A : List[Any] , A : Any , A : Optional[Any]=None , A : Tuple=None , A : Union[str, Any]=True ):
'''simple docstring'''
if is_finetuned:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
_UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
if config_path is not None:
_UpperCAmelCase = SEWConfig.from_pretrained(A )
else:
_UpperCAmelCase = convert_config(model[0] , A )
_UpperCAmelCase = model[0].eval()
_UpperCAmelCase = True if config.feat_extract_norm == 'layer' else False
_UpperCAmelCase = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=A , return_attention_mask=A , )
if is_finetuned:
if dict_path:
_UpperCAmelCase = Dictionary.load(A )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
_UpperCAmelCase = target_dict.pad_index
_UpperCAmelCase = target_dict.bos_index
_UpperCAmelCase = target_dict.pad_index
_UpperCAmelCase = target_dict.bos_index
_UpperCAmelCase = target_dict.eos_index
_UpperCAmelCase = len(target_dict.symbols )
_UpperCAmelCase = os.path.join(A , 'vocab.json' )
if not os.path.isdir(A ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(A ) )
return
os.makedirs(A , exist_ok=A )
with open(A , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , A )
_UpperCAmelCase = WavaVecaCTCTokenizer(
A , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=A , )
_UpperCAmelCase = WavaVecaProcessor(feature_extractor=A , tokenizer=A )
processor.save_pretrained(A )
_UpperCAmelCase = SEWForCTC(A )
else:
_UpperCAmelCase = SEWModel(A )
feature_extractor.save_pretrained(A )
recursively_load_weights(A , A , A )
hf_model.save_pretrained(A )
if __name__ == "__main__":
lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--is_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
lowercase = parser.parse_args()
convert_sew_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, args.is_finetuned
)
| 24 |
"""simple docstring"""
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = [[0 for _ in range(A )] for _ in range(m + 1 )]
for i in range(m + 1 ):
_UpperCAmelCase = 1
for n in range(m + 1 ):
for k in range(1 , A ):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
lowercase = int(input('''Enter a number: ''').strip())
print(partition(n))
except ValueError:
print('''Please enter a number.''')
else:
try:
lowercase = int(sys.argv[1])
print(partition(n))
except ValueError:
print('''Please pass a number.''')
| 24 | 1 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowercase__ ( A, unittest.TestCase ):
'''simple docstring'''
_UpperCAmelCase = FunnelTokenizer
_UpperCAmelCase = FunnelTokenizerFast
_UpperCAmelCase = True
_UpperCAmelCase = True
def lowerCamelCase_ ( self ) -> int:
super().setUp()
_UpperCAmelCase = [
'<unk>',
'<cls>',
'<sep>',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file , 'w' , encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def lowerCamelCase_ ( self , **snake_case ) -> Any:
return FunnelTokenizer.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self , **snake_case ) -> Union[str, Any]:
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **snake_case )
def lowerCamelCase_ ( self , snake_case ) -> int:
_UpperCAmelCase = 'UNwant\u00E9d,running'
_UpperCAmelCase = 'unwanted, running'
return input_text, output_text
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = self.tokenizer_class(self.vocab_file )
_UpperCAmelCase = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(snake_case , ['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(snake_case ) , [7, 4, 5, 10, 8, 9] )
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = self.get_tokenizers(do_lower_case=snake_case )
for tokenizer in tokenizers:
_UpperCAmelCase = tokenizer('UNwant\u00E9d,running' )
_UpperCAmelCase = len(inputs['input_ids'] ) - 1
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len )
_UpperCAmelCase = tokenizer('UNwant\u00E9d,running' , 'UNwant\u00E9d,running' )
self.assertListEqual(inputs['token_type_ids'] , [2] + [0] * sentence_len + [1] * sentence_len )
| 24 |
"""simple docstring"""
import os
lowercase = {'''I''': 1, '''V''': 5, '''X''': 10, '''L''': 50, '''C''': 1_00, '''D''': 5_00, '''M''': 10_00}
def UpperCAmelCase ( A : str ):
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while index < len(A ) - 1:
_UpperCAmelCase = SYMBOLS[numerals[index]]
_UpperCAmelCase = SYMBOLS[numerals[index + 1]]
if current_value < next_value:
total_value -= current_value
else:
total_value += current_value
index += 1
total_value += SYMBOLS[numerals[index]]
return total_value
def UpperCAmelCase ( A : int ):
'''simple docstring'''
_UpperCAmelCase = ''
_UpperCAmelCase = num // 1000
numerals += m_count * "M"
num %= 1000
_UpperCAmelCase = num // 100
if c_count == 9:
numerals += "CM"
c_count -= 9
elif c_count == 4:
numerals += "CD"
c_count -= 4
if c_count >= 5:
numerals += "D"
c_count -= 5
numerals += c_count * "C"
num %= 100
_UpperCAmelCase = num // 10
if x_count == 9:
numerals += "XC"
x_count -= 9
elif x_count == 4:
numerals += "XL"
x_count -= 4
if x_count >= 5:
numerals += "L"
x_count -= 5
numerals += x_count * "X"
num %= 10
if num == 9:
numerals += "IX"
num -= 9
elif num == 4:
numerals += "IV"
num -= 4
if num >= 5:
numerals += "V"
num -= 5
numerals += num * "I"
return numerals
def UpperCAmelCase ( A : str = "/p089_roman.txt" ):
'''simple docstring'''
_UpperCAmelCase = 0
with open(os.path.dirname(A ) + roman_numerals_filename ) as filea:
_UpperCAmelCase = filea.readlines()
for line in lines:
_UpperCAmelCase = line.strip()
_UpperCAmelCase = parse_roman_numerals(A )
_UpperCAmelCase = generate_roman_numerals(A )
savings += len(A ) - len(A )
return savings
if __name__ == "__main__":
print(F'''{solution() = }''')
| 24 | 1 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
lowercase = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class lowercase__ ( A ):
'''simple docstring'''
def __init__( self , snake_case = 101 ) -> Union[str, Any]:
_UpperCAmelCase = length
def __len__( self ) -> List[str]:
return self.length
def __getitem__( self , snake_case ) -> int:
return i
class lowercase__ :
'''simple docstring'''
def __call__( self , snake_case ) -> Dict:
return {"input_ids": torch.tensor(snake_case ), "labels": torch.tensor(snake_case )}
class lowercase__ ( nn.Module ):
'''simple docstring'''
def __init__( self ) -> Union[str, Any]:
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_UpperCAmelCase = nn.Linear(120 , 80 )
def lowerCamelCase_ ( self , snake_case , snake_case=None ) -> int:
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class lowercase__ ( A ):
'''simple docstring'''
@require_torch_neuroncore
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = f'--nproc_per_node=2\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'--output_dir {output_dir}'.split()
_UpperCAmelCase = ['torchrun'] + distributed_args + args
execute_subprocess_async(snake_case , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class lowercase__ ( A ):
'''simple docstring'''
@require_torch_multi_gpu
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = f'--nproc_per_node={torch.cuda.device_count()}\n --master_port={get_torch_dist_unique_port()}\n {self.test_file_dir}/test_trainer_distributed.py\n '.split()
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f'--output_dir {output_dir}'.split()
_UpperCAmelCase = ['torchrun'] + distributed_args + args
execute_subprocess_async(snake_case , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
lowercase = HfArgumentParser((TrainingArguments,))
lowercase = parser.parse_args_into_dataclasses()[0]
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, '''
F'''distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}'''
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_01, 40, 7]:
lowercase = DummyDataset(dataset_length)
def UpperCAmelCase ( A : EvalPrediction ):
'''simple docstring'''
_UpperCAmelCase = list(range(len(A ) ) )
_UpperCAmelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
'Predictions and/or labels do not match expected results:\n - predictions: '
f'{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}' )
return {"success": success}
lowercase = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
lowercase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase = 2
lowercase = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
lowercase = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
lowercase = None
| 24 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_flax, require_tf, require_torch
from transformers.utils import (
expand_dims,
flatten_dict,
is_flax_available,
is_tf_available,
is_torch_available,
reshape,
squeeze,
transpose,
)
if is_flax_available():
import jax.numpy as jnp
if is_tf_available():
import tensorflow as tf
if is_torch_available():
import torch
class lowercase__ ( unittest.TestCase ):
'''simple docstring'''
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = {
'task_specific_params': {
'summarization': {'length_penalty': 1.0, 'max_length': 128, 'min_length': 12, 'num_beams': 4},
'summarization_cnn': {'length_penalty': 2.0, 'max_length': 142, 'min_length': 56, 'num_beams': 4},
'summarization_xsum': {'length_penalty': 1.0, 'max_length': 62, 'min_length': 11, 'num_beams': 6},
}
}
_UpperCAmelCase = {
'task_specific_params.summarization.length_penalty': 1.0,
'task_specific_params.summarization.max_length': 128,
'task_specific_params.summarization.min_length': 12,
'task_specific_params.summarization.num_beams': 4,
'task_specific_params.summarization_cnn.length_penalty': 2.0,
'task_specific_params.summarization_cnn.max_length': 142,
'task_specific_params.summarization_cnn.min_length': 56,
'task_specific_params.summarization_cnn.num_beams': 4,
'task_specific_params.summarization_xsum.length_penalty': 1.0,
'task_specific_params.summarization_xsum.max_length': 62,
'task_specific_params.summarization_xsum.min_length': 11,
'task_specific_params.summarization_xsum.num_beams': 6,
}
self.assertEqual(flatten_dict(snake_case ) , snake_case )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(transpose(snake_case ) , x.transpose() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , x.transpose((1, 2, 0) ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , transpose(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , transpose(snake_case , axes=(1, 2, 0) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case ) , np.asarray(transpose(snake_case ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(transpose(snake_case , axes=(1, 2, 0) ) , np.asarray(transpose(snake_case , axes=(1, 2, 0) ) ) ) )
def lowerCamelCase_ ( self ) -> List[str]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.reshape(snake_case , (4, 3) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.reshape(snake_case , (12, 5) ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Optional[Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , reshape(snake_case , (4, 3) ).numpy() ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , reshape(snake_case , (12, 5) ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> Tuple:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (4, 3) ) , np.asarray(reshape(snake_case , (4, 3) ) ) ) )
_UpperCAmelCase = np.random.randn(3 , 4 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(reshape(snake_case , (12, 5) ) , np.asarray(reshape(snake_case , (12, 5) ) ) ) )
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.squeeze(snake_case ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.squeeze(snake_case , axis=2 ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> Optional[int]:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , squeeze(snake_case ).numpy() ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , squeeze(snake_case , axis=2 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(1 , 3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case ) , np.asarray(squeeze(snake_case ) ) ) )
_UpperCAmelCase = np.random.randn(1 , 4 , 1 , 5 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(squeeze(snake_case , axis=2 ) , np.asarray(squeeze(snake_case , axis=2 ) ) ) )
def lowerCamelCase_ ( self ) -> Union[str, Any]:
_UpperCAmelCase = np.random.randn(3 , 4 )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.expand_dims(snake_case , axis=1 ) ) )
@require_torch
def lowerCamelCase_ ( self ) -> Dict:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = torch.tensor(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_tf
def lowerCamelCase_ ( self ) -> int:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = tf.constant(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , expand_dims(snake_case , axis=1 ).numpy() ) )
@require_flax
def lowerCamelCase_ ( self ) -> str:
_UpperCAmelCase = np.random.randn(3 , 4 )
_UpperCAmelCase = jnp.array(snake_case )
self.assertTrue(np.allclose(expand_dims(snake_case , axis=1 ) , np.asarray(expand_dims(snake_case , axis=1 ) ) ) )
| 24 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.