code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Dict , __A : Optional[int]=9_9 , __A : Any=1_3 , __A : Dict=7 , __A : Tuple=9 , __A : int=True , __A : Dict=True , __A : Tuple=False , __A : Dict=3_2 , __A : List[Any]=5 , __A : int=4 , __A : Tuple=3_7 , __A : Tuple=8 , __A : int=0.1 , __A : int=0.0_0_2 , __A : int=1 , __A : Tuple=0 , __A : Dict=0 , __A : List[Any]=None , __A : Optional[int]=None , ):
snake_case__ : Any = parent
snake_case__ : int = batch_size
snake_case__ : Dict = encoder_seq_length
snake_case__ : Optional[Any] = decoder_seq_length
# For common tests
snake_case__ : str = self.decoder_seq_length
snake_case__ : Any = is_training
snake_case__ : int = use_attention_mask
snake_case__ : Any = use_labels
snake_case__ : str = vocab_size
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : List[Any] = d_ff
snake_case__ : List[str] = relative_attention_num_buckets
snake_case__ : str = dropout_rate
snake_case__ : int = initializer_factor
snake_case__ : Optional[Any] = eos_token_id
snake_case__ : Tuple = pad_token_id
snake_case__ : List[str] = decoder_start_token_id
snake_case__ : Optional[Any] = None
snake_case__ : List[str] = decoder_layers
def _lowercase ( self : Union[str, Any] ):
return TaConfig.from_pretrained("google/umt5-base" )
def _lowercase ( self : Any , __A : Optional[int] , __A : str , __A : Optional[Any] , __A : int=None , __A : Any=None , __A : List[Any]=None , __A : Any=None , __A : Any=None , ):
if attention_mask is None:
snake_case__ : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case__ : Union[str, Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case__ : Dict = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__A )
if decoder_head_mask is None:
snake_case__ : Tuple = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__A )
if cross_attn_head_mask is None:
snake_case__ : Any = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__A )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case__ : List[Any] = input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : List[str] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Optional[Any] = self.get_config()
snake_case__ : List[str] = config.num_attention_heads
snake_case__ : Union[str, Any] = self.prepare_inputs_dict(__A , __A , __A )
return config, input_dict
def _lowercase ( self : Optional[int] ):
snake_case__, snake_case__ : int = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : str ):
return TaConfig(
vocab_size=1_6_6 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowercase ( self : Tuple ):
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _lowercase ( self : List[str] , __A : Optional[int] , __A : Optional[int] , __A : List[Any] , __A : str , __A : Any , __A : List[str] , ):
snake_case__ : List[Any] = UMTaModel(config=__A )
model.to(__A )
model.eval()
snake_case__ : List[Any] = model(
input_ids=__A , decoder_input_ids=__A , attention_mask=__A , decoder_attention_mask=__A , )
snake_case__ : List[str] = model(input_ids=__A , decoder_input_ids=__A )
snake_case__ : int = result.last_hidden_state
snake_case__ : Optional[Any] = result.past_key_values
snake_case__ : Optional[Any] = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__A ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _lowercase ( self : List[str] , __A : Tuple , __A : Dict , __A : int , __A : List[str] , __A : Optional[int] , __A : Any , ):
snake_case__ : Optional[Any] = UMTaModel(config=__A ).get_decoder().to(__A ).eval()
# first forward pass
snake_case__ : Tuple = model(__A , use_cache=__A )
snake_case__ : List[str] = model(__A )
snake_case__ : List[str] = model(__A , use_cache=__A )
self.parent.assertTrue(len(__A ) == len(__A ) )
self.parent.assertTrue(len(__A ) == len(__A ) + 1 )
snake_case__, snake_case__ : Union[str, Any] = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
snake_case__ : Optional[Any] = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
snake_case__ : List[Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : Tuple = model(__A )["last_hidden_state"]
snake_case__ : Tuple = model(__A , past_key_values=__A )["last_hidden_state"]
# select random slice
snake_case__ : str = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -1, random_slice_idx].detach()
snake_case__ : List[str] = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-3 ) )
def _lowercase ( self : int , __A : Optional[int] , __A : List[str] , ):
snake_case__ : List[str] = UMTaModel(config=__A ).to(__A ).half().eval()
snake_case__ : Any = model(**__A )["last_hidden_state"]
self.parent.assertFalse(torch.isnan(__A ).any().item() )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a_ = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
"conversational": UMTaForConditionalGeneration,
"feature-extraction": UMTaModel,
"summarization": UMTaForConditionalGeneration,
"text2text-generation": UMTaForConditionalGeneration,
"translation": UMTaForConditionalGeneration,
"question-answering": UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a_ = True
a_ = False
a_ = False
a_ = True
a_ = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a_ = [0.8, 0.9]
def _lowercase ( self : int ):
snake_case__ : Tuple = UMTaModelTester(self )
@unittest.skip("Test has a segmentation fault on torch 1.8.0" )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Tuple = UMTaModel(config_and_inputs[0] ).to(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__A , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f'''{tmpdirname}/t5_test.onnx''' , export_params=__A , opset_version=9 , input_names=["input_ids", "decoder_input_ids"] , )
@unittest.skipIf(torch_device == "cpu" , "Cant do half precision" )
def _lowercase ( self : Optional[int] ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__A )
def _lowercase ( self : Tuple ):
snake_case__ : Union[str, Any] = ["encoder_attentions", "decoder_attentions", "cross_attentions"]
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : List[Any] = config_and_inputs[0]
snake_case__ : Optional[int] = UMTaForConditionalGeneration(__A ).eval()
model.to(__A )
snake_case__ : Optional[Any] = {
"head_mask": torch.zeros(config.num_layers , config.num_heads , device=__A ),
"decoder_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=__A ),
"cross_attn_head_mask": torch.zeros(config.num_decoder_layers , config.num_heads , device=__A ),
}
for attn_name, (name, mask) in zip(__A , head_masking.items() ):
snake_case__ : List[str] = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
snake_case__ : str = torch.ones(
config.num_decoder_layers , config.num_heads , device=__A )
snake_case__ : str = model.generate(
config_and_inputs[1]["input_ids"] , num_beams=1 , max_length=3 , output_attentions=__A , return_dict_in_generate=__A , **__A , )
# We check the state of decoder_attentions and cross_attentions just from the last step
snake_case__ : int = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("Does not work on the tiny model as we keep hitting edge cases." )
def _lowercase ( self : List[str] ):
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
@unittest.skip(
"Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged" )
def _lowercase ( self : Dict ):
snake_case__ : List[Any] = UMTaForConditionalGeneration.from_pretrained("google/umt5-small" , return_dict=__A ).to(__A )
snake_case__ : Dict = AutoTokenizer.from_pretrained("google/umt5-small" , use_fast=__A , legacy=__A )
snake_case__ : Any = [
"Bonjour monsieur <extra_id_0> bien <extra_id_1>.",
"No se como puedo <extra_id_0>.",
"This is the reason why we <extra_id_0> them.",
"The <extra_id_0> walks in <extra_id_1>, seats",
"A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.",
]
snake_case__ : List[str] = tokenizer(__A , return_tensors="pt" , padding=__A ).input_ids
# fmt: off
snake_case__ : Optional[int] = torch.tensor(
[
[ 3_8_5_3_0, 2_1_0_7_0_3, 2_5_6_2_9_9, 1_4_1_0, 2_5_6_2_9_8, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_2_6, 3_2_1, 6_7_1, 2_5_9_2_2, 2_5_6_2_9_9, 2_7_4, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 1_4_6_0, 3_3_9, 3_1_2, 1_9_0_1_4, 1_0_6_2_0, 7_5_8, 2_5_6_2_9_9, 2_3_5_5,2_7_4, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_1_7, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 3_0_1, 2_5_6_2_9_8, 2_7_5, 1_1_9_9_8_3,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_2_0, 2_5_6_2_9_9, 1_4_8_6_9, 2_8_1, 2_2_3_4, 2_8_9, 2_2_7_5, 3_3_3,6_1_3_9_1, 2_8_9, 2_5_6_2_9_8, 5_4_3, 2_5_6_2_9_7, 1_6_8_7_1_4, 3_2_9, 2_5_6_2_9_6,2_7_4, 1],
] )
# fmt: on
torch.testing.assert_allclose(__A , __A )
snake_case__ : Dict = model.generate(input_ids.to(__A ) )
snake_case__ : Optional[int] = [
"<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ ๐ <extra_id_56>ajลกietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajลกie</s>",
"<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> ํผํด[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
"<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>",
]
snake_case__ : str = tokenizer.batch_decode(__A )
self.assertEqual(__A , __A )
| 25 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 25 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTextModelWithProjection, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DiffusionPipeline,
EulerDiscreteScheduler,
StableDiffusionXLImgaImgPipeline,
UNetaDConditionModel,
)
from diffusers.utils import floats_tensor, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = StableDiffusionXLImgaImgPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width"}
a_ = PipelineTesterMixin.required_optional_params - {"latents"}
a_ = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : str ):
torch.manual_seed(0 )
snake_case__ : int = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , attention_head_dim=(2, 4) , use_linear_projection=__A , addition_embed_type="text_time" , addition_time_embed_dim=8 , transformer_layers_per_block=(1, 2) , projection_class_embeddings_input_dim=8_0 , cross_attention_dim=6_4 , )
snake_case__ : Union[str, Any] = EulerDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , steps_offset=1 , beta_schedule="scaled_linear" , timestep_spacing="leading" , )
torch.manual_seed(0 )
snake_case__ : Any = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , sample_size=1_2_8 , )
torch.manual_seed(0 )
snake_case__ : Optional[int] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , hidden_act="gelu" , projection_dim=3_2 , )
snake_case__ : Optional[int] = CLIPTextModel(__A )
snake_case__ : Any = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=__A )
snake_case__ : Optional[Any] = CLIPTextModelWithProjection(__A )
snake_case__ : Tuple = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" , local_files_only=__A )
snake_case__ : Tuple = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"text_encoder_2": text_encoder_a,
"tokenizer_2": tokenizer_a,
# "safety_checker": None,
# "feature_extractor": None,
}
return components
def _lowercase ( self : Tuple , __A : str , __A : Optional[int]=0 ):
snake_case__ : List[str] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__A ) ).to(__A )
snake_case__ : Union[str, Any] = image / 2 + 0.5
if str(__A ).startswith("mps" ):
snake_case__ : Optional[int] = torch.manual_seed(__A )
else:
snake_case__ : List[Any] = torch.Generator(device=__A ).manual_seed(__A )
snake_case__ : str = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 5.0,
"output_type": "numpy",
"strength": 0.7_5,
}
return inputs
def _lowercase ( self : Tuple ):
snake_case__ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : str = self.get_dummy_components()
snake_case__ : str = StableDiffusionXLImgaImgPipeline(**__A )
snake_case__ : List[str] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : Optional[Any] = self.get_dummy_inputs(__A )
snake_case__ : Dict = sd_pipe(**__A ).images
snake_case__ : int = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : Any = np.array([0.4_6_5_6, 0.4_8_4_0, 0.4_4_3_9, 0.6_6_9_8, 0.5_5_7_4, 0.4_5_2_4, 0.5_7_9_9, 0.5_9_4_3, 0.5_1_6_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Tuple ):
super().test_attention_slicing_forward_pass(expected_max_diff=3e-3 )
def _lowercase ( self : Tuple ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowercase ( self : Optional[int] ):
pass
def _lowercase ( self : int ):
snake_case__ : List[str] = self.get_dummy_components()
snake_case__ : Dict = StableDiffusionXLImgaImgPipeline(**__A )
snake_case__ : Any = sd_pipe.to(__A )
snake_case__ : int = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
# forward without prompt embeds
snake_case__ : int = self.get_dummy_inputs(__A )
snake_case__ : Tuple = 3 * ["this is a negative prompt"]
snake_case__ : Optional[Any] = negative_prompt
snake_case__ : List[str] = 3 * [inputs["prompt"]]
snake_case__ : str = sd_pipe(**__A )
snake_case__ : Optional[Any] = output.images[0, -3:, -3:, -1]
# forward with prompt embeds
snake_case__ : Optional[Any] = self.get_dummy_inputs(__A )
snake_case__ : Union[str, Any] = 3 * ["this is a negative prompt"]
snake_case__ : Union[str, Any] = 3 * [inputs.pop("prompt" )]
(
(
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
),
) : int = sd_pipe.encode_prompt(__A , negative_prompt=__A )
snake_case__ : List[str] = sd_pipe(
**__A , prompt_embeds=__A , negative_prompt_embeds=__A , pooled_prompt_embeds=__A , negative_pooled_prompt_embeds=__A , )
snake_case__ : Optional[Any] = output.images[0, -3:, -3:, -1]
# make sure that it's equal
assert np.abs(image_slice_a.flatten() - image_slice_a.flatten() ).max() < 1e-4
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Union[str, Any] , __A : str , __A : int="cpu" , __A : str=torch.floataa , __A : Tuple=0 ):
snake_case__ : Tuple = torch.Generator(device=__A ).manual_seed(__A )
snake_case__ : Union[str, Any] = np.random.RandomState(__A ).standard_normal((1, 4, 6_4, 6_4) )
snake_case__ : Dict = torch.from_numpy(__A ).to(device=__A , dtype=__A )
snake_case__ : List[str] = {
"prompt": "a photograph of an astronaut riding a horse",
"latents": latents,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : Optional[int] ):
snake_case__ : Optional[Any] = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-base" )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : List[str] = self.get_inputs(__A )
snake_case__ : List[str] = pipe(**__A ).images
snake_case__ : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Optional[Any] = np.array([0.4_9_4_9_3, 0.4_7_8_9_6, 0.4_0_7_9_8, 0.5_4_2_1_4, 0.5_3_2_1_2, 0.4_8_2_0_2, 0.4_7_6_5_6, 0.4_6_3_2_9, 0.4_8_5_0_6] )
assert np.abs(image_slice - expected_slice ).max() < 7e-3
| 25 |
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Any = [0] * len(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
# use last results for better performance - dynamic programming
snake_case__ : Union[str, Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
snake_case__ : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
snake_case__ : int = j
return prefix_result
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return max(prefix_function(snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Optional[int] = {
"""configuration_clap""": [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapAudioConfig""",
"""ClapConfig""",
"""ClapTextConfig""",
],
"""processing_clap""": ["""ClapProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""CLAP_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ClapModel""",
"""ClapPreTrainedModel""",
"""ClapTextModel""",
"""ClapTextModelWithProjection""",
"""ClapAudioModel""",
"""ClapAudioModelWithProjection""",
]
__lowerCamelCase : Tuple = ["""ClapFeatureExtractor"""]
if TYPE_CHECKING:
from .configuration_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioConfig,
ClapConfig,
ClapTextConfig,
)
from .processing_clap import ClapProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clap import ClapFeatureExtractor
from .modeling_clap import (
CLAP_PRETRAINED_MODEL_ARCHIVE_LIST,
ClapAudioModel,
ClapAudioModelWithProjection,
ClapModel,
ClapPreTrainedModel,
ClapTextModel,
ClapTextModelWithProjection,
)
else:
import sys
__lowerCamelCase : Tuple = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[int] = get_logger()
__lowerCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case__ : str = str(jax.devices()[0] )
snake_case__ : str = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def _lowercase ( self : Optional[Any] , __A : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def _lowercase ( self : int , __A : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ : Optional[int] = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case__ : Any = {"dtype": jnp.intaa}
else:
snake_case__ : Tuple = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
snake_case__ : Optional[Any] = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ):
snake_case__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def _lowercase ( self : Tuple , __A : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def _lowercase ( self : Optional[int] , __A : pa.Table ):
snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A )
snake_case__ : Tuple = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def _lowercase ( self : Optional[Any] , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A )
snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
snake_case__ : Dict = self._consolidate(__A )
return column
def _lowercase ( self : str , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A )
snake_case__ : int = self.python_features_decoder.decode_batch(__A )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
for column_name in batch:
snake_case__ : Any = self._consolidate(batch[column_name] )
return batch
| 25 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .attention_processor import AttentionProcessor, AttnProcessor
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, DiagonalGaussianDistribution, Encoder
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = 42
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ ):
"""simple docstring"""
a_ = True
@register_to_config
def __init__( self : int , __A : int = 3 , __A : int = 3 , __A : Tuple[str] = ("DownEncoderBlock2D",) , __A : Tuple[str] = ("UpDecoderBlock2D",) , __A : Tuple[int] = (6_4,) , __A : int = 1 , __A : str = "silu" , __A : int = 4 , __A : int = 3_2 , __A : int = 3_2 , __A : float = 0.1_8_2_1_5 , ):
super().__init__()
# pass init params to Encoder
snake_case__ : List[str] = Encoder(
in_channels=__A , out_channels=__A , down_block_types=__A , block_out_channels=__A , layers_per_block=__A , act_fn=__A , norm_num_groups=__A , double_z=__A , )
# pass init params to Decoder
snake_case__ : Union[str, Any] = Decoder(
in_channels=__A , out_channels=__A , up_block_types=__A , block_out_channels=__A , layers_per_block=__A , norm_num_groups=__A , act_fn=__A , )
snake_case__ : List[Any] = nn.Convad(2 * latent_channels , 2 * latent_channels , 1 )
snake_case__ : Any = nn.Convad(__A , __A , 1 )
snake_case__ : str = False
snake_case__ : Dict = False
# only relevant if vae tiling is enabled
snake_case__ : List[str] = self.config.sample_size
snake_case__ : List[Any] = (
self.config.sample_size[0]
if isinstance(self.config.sample_size , (list, tuple) )
else self.config.sample_size
)
snake_case__ : Tuple = int(sample_size / (2 ** (len(self.config.block_out_channels ) - 1)) )
snake_case__ : List[str] = 0.2_5
def _lowercase ( self : int , __A : int , __A : int=False ):
if isinstance(__A , (Encoder, Decoder) ):
snake_case__ : List[Any] = value
def _lowercase ( self : Union[str, Any] , __A : bool = True ):
snake_case__ : int = use_tiling
def _lowercase ( self : Tuple ):
self.enable_tiling(__A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : str = True
def _lowercase ( self : Tuple ):
snake_case__ : Optional[int] = False
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = {}
def fn_recursive_add_processors(__A : str , __A : torch.nn.Module , __A : Dict[str, AttentionProcessor] ):
if hasattr(__A , "set_processor" ):
snake_case__ : List[Any] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , __A , __A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__A , __A , __A )
return processors
def _lowercase ( self : Union[str, Any] , __A : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
snake_case__ : str = len(self.attn_processors.keys() )
if isinstance(__A , __A ) and len(__A ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(__A )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(__A : str , __A : torch.nn.Module , __A : List[str] ):
if hasattr(__A , "set_processor" ):
if not isinstance(__A , __A ):
module.set_processor(__A )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , __A , __A )
for name, module in self.named_children():
fn_recursive_attn_processor(__A , __A , __A )
def _lowercase ( self : Any ):
self.set_attn_processor(AttnProcessor() )
@apply_forward_hook
def _lowercase ( self : Optional[Any] , __A : torch.FloatTensor , __A : bool = True ):
if self.use_tiling and (x.shape[-1] > self.tile_sample_min_size or x.shape[-2] > self.tile_sample_min_size):
return self.tiled_encode(__A , return_dict=__A )
if self.use_slicing and x.shape[0] > 1:
snake_case__ : int = [self.encoder(__A ) for x_slice in x.split(1 )]
snake_case__ : str = torch.cat(__A )
else:
snake_case__ : Dict = self.encoder(__A )
snake_case__ : Optional[int] = self.quant_conv(__A )
snake_case__ : int = DiagonalGaussianDistribution(__A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__A )
def _lowercase ( self : Optional[Any] , __A : torch.FloatTensor , __A : bool = True ):
if self.use_tiling and (z.shape[-1] > self.tile_latent_min_size or z.shape[-2] > self.tile_latent_min_size):
return self.tiled_decode(__A , return_dict=__A )
snake_case__ : Optional[int] = self.post_quant_conv(__A )
snake_case__ : List[str] = self.decoder(__A )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
@apply_forward_hook
def _lowercase ( self : str , __A : torch.FloatTensor , __A : bool = True ):
if self.use_slicing and z.shape[0] > 1:
snake_case__ : int = [self._decode(__A ).sample for z_slice in z.split(1 )]
snake_case__ : Union[str, Any] = torch.cat(__A )
else:
snake_case__ : Optional[Any] = self._decode(__A ).sample
if not return_dict:
return (decoded,)
return DecoderOutput(sample=__A )
def _lowercase ( self : List[Any] , __A : Union[str, Any] , __A : Dict , __A : Union[str, Any] ):
snake_case__ : List[str] = min(a.shape[2] , b.shape[2] , __A )
for y in range(__A ):
snake_case__ : Optional[Any] = a[:, :, -blend_extent + y, :] * (1 - y / blend_extent) + b[:, :, y, :] * (y / blend_extent)
return b
def _lowercase ( self : str , __A : int , __A : Union[str, Any] , __A : str ):
snake_case__ : Optional[Any] = min(a.shape[3] , b.shape[3] , __A )
for x in range(__A ):
snake_case__ : Tuple = a[:, :, :, -blend_extent + x] * (1 - x / blend_extent) + b[:, :, :, x] * (x / blend_extent)
return b
def _lowercase ( self : List[str] , __A : torch.FloatTensor , __A : bool = True ):
snake_case__ : List[Any] = int(self.tile_sample_min_size * (1 - self.tile_overlap_factor) )
snake_case__ : Union[str, Any] = int(self.tile_latent_min_size * self.tile_overlap_factor )
snake_case__ : Union[str, Any] = self.tile_latent_min_size - blend_extent
# Split the image into 512x512 tiles and encode them separately.
snake_case__ : Tuple = []
for i in range(0 , x.shape[2] , __A ):
snake_case__ : int = []
for j in range(0 , x.shape[3] , __A ):
snake_case__ : str = x[:, :, i : i + self.tile_sample_min_size, j : j + self.tile_sample_min_size]
snake_case__ : Optional[Any] = self.encoder(__A )
snake_case__ : Union[str, Any] = self.quant_conv(__A )
row.append(__A )
rows.append(__A )
snake_case__ : Any = []
for i, row in enumerate(__A ):
snake_case__ : Optional[int] = []
for j, tile in enumerate(__A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
snake_case__ : str = self.blend_v(rows[i - 1][j] , __A , __A )
if j > 0:
snake_case__ : List[Any] = self.blend_h(row[j - 1] , __A , __A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__A , dim=3 ) )
snake_case__ : Any = torch.cat(__A , dim=2 )
snake_case__ : Optional[Any] = DiagonalGaussianDistribution(__A )
if not return_dict:
return (posterior,)
return AutoencoderKLOutput(latent_dist=__A )
def _lowercase ( self : Any , __A : torch.FloatTensor , __A : bool = True ):
snake_case__ : Any = int(self.tile_latent_min_size * (1 - self.tile_overlap_factor) )
snake_case__ : Union[str, Any] = int(self.tile_sample_min_size * self.tile_overlap_factor )
snake_case__ : int = self.tile_sample_min_size - blend_extent
# Split z into overlapping 64x64 tiles and decode them separately.
# The tiles have an overlap to avoid seams between tiles.
snake_case__ : Union[str, Any] = []
for i in range(0 , z.shape[2] , __A ):
snake_case__ : Dict = []
for j in range(0 , z.shape[3] , __A ):
snake_case__ : Optional[int] = z[:, :, i : i + self.tile_latent_min_size, j : j + self.tile_latent_min_size]
snake_case__ : Optional[Any] = self.post_quant_conv(__A )
snake_case__ : Optional[int] = self.decoder(__A )
row.append(__A )
rows.append(__A )
snake_case__ : Tuple = []
for i, row in enumerate(__A ):
snake_case__ : Optional[int] = []
for j, tile in enumerate(__A ):
# blend the above tile and the left tile
# to the current tile and add the current tile to the result row
if i > 0:
snake_case__ : Any = self.blend_v(rows[i - 1][j] , __A , __A )
if j > 0:
snake_case__ : Dict = self.blend_h(row[j - 1] , __A , __A )
result_row.append(tile[:, :, :row_limit, :row_limit] )
result_rows.append(torch.cat(__A , dim=3 ) )
snake_case__ : Any = torch.cat(__A , dim=2 )
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
def _lowercase ( self : Union[str, Any] , __A : torch.FloatTensor , __A : bool = False , __A : bool = True , __A : Optional[torch.Generator] = None , ):
snake_case__ : List[Any] = sample
snake_case__ : int = self.encode(__A ).latent_dist
if sample_posterior:
snake_case__ : Union[str, Any] = posterior.sample(generator=__A )
else:
snake_case__ : int = posterior.mode()
snake_case__ : int = self.decode(__A ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=__A )
| 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 1 |
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
return getitem, k
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Tuple ):
return setitem, k, v
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return delitem, k
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Any , *snake_case_ : int ):
try:
return fun(snake_case_ , *snake_case_ ), None
except Exception as e:
return None, e
__lowerCamelCase : Any = (
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
)
__lowerCamelCase : int = [
_set("""key_a""", """val_a"""),
_set("""key_a""", """val_b"""),
]
__lowerCamelCase : List[Any] = [
_set("""key_a""", """val_a"""),
_set("""key_b""", """val_b"""),
_del("""key_a"""),
_del("""key_b"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
]
__lowerCamelCase : Optional[int] = [
_get("""key_a"""),
_del("""key_a"""),
_set("""key_a""", """val_a"""),
_del("""key_a"""),
_del("""key_a"""),
_get("""key_a"""),
]
__lowerCamelCase : Any = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
__lowerCamelCase : Optional[int] = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set("""key_a""", """val_b"""),
]
@pytest.mark.parametrize(
"operations" , (
pytest.param(_add_items , id="add items" ),
pytest.param(_overwrite_items , id="overwrite items" ),
pytest.param(_delete_items , id="delete items" ),
pytest.param(_access_absent_items , id="access absent items" ),
pytest.param(_add_with_resize_up , id="add with resize up" ),
pytest.param(_add_with_resize_down , id="add with resize down" ),
) , )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Optional[Any] = HashMap(initial_block_size=4 )
snake_case__ : Optional[Any] = {}
for _, (fun, *args) in enumerate(snake_case_ ):
snake_case__, snake_case__ : List[str] = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
snake_case__, snake_case__ : Dict = _run_operation(snake_case_ , snake_case_ , *snake_case_ )
assert my_res == py_res
assert str(snake_case_ ) == str(snake_case_ )
assert set(snake_case_ ) == set(snake_case_ )
assert len(snake_case_ ) == len(snake_case_ )
assert set(my.items() ) == set(py.items() )
def SCREAMING_SNAKE_CASE ( ):
def is_public(snake_case_ : str ) -> bool:
return not name.startswith("_" )
snake_case__ : Tuple = {name for name in dir({} ) if is_public(snake_case_ )}
snake_case__ : List[Any] = {name for name in dir(HashMap() ) if is_public(snake_case_ )}
assert dict_public_names > hash_public_names
| 25 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Tuple = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def _lowercase ( self : Dict ):
snake_case__ : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Union[str, Any] = get_activation("gelu" )
snake_case__ : int = get_activation("gelu_10" )
snake_case__ : Optional[int] = torch_builtin(__A )
snake_case__ : Dict = geluaa(__A )
snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowercase ( self : str ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__A ):
get_activation("bogus" )
with self.assertRaises(__A ):
get_activation(__A )
def _lowercase ( self : List[str] ):
snake_case__ : List[str] = get_activation("gelu" )
snake_case__ : Any = 1
snake_case__ : Union[str, Any] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
snake_case__ : int = acta.a
| 25 | 1 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
a_ = Features({"text": Value("string" )} )
a_ = Features({"labels": ClassLabel} )
a_ = "text"
a_ = "labels"
def _lowercase ( self : Tuple , __A : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
snake_case__ : Any = copy.deepcopy(self )
snake_case__ : Optional[Any] = self.label_schema.copy()
snake_case__ : List[str] = features[self.label_column]
snake_case__ : Dict = label_schema
return task_template
@property
def _lowercase ( self : Tuple ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 25 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : str = value
elif weight_type == "weight_g":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : str = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
snake_case__ : str = []
snake_case__ : Optional[int] = fairseq_model.state_dict()
snake_case__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[Any] = "weight"
else:
snake_case__ : Optional[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
snake_case__ : Tuple = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : str = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ):
if config_path is not None:
snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = UniSpeechSatConfig()
snake_case__ : str = ""
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ )
else:
snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ )
snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"""edbeeching/decision-transformer-gym-hopper-medium""": (
"""https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json"""
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "decision_transformer"
a_ = ["past_key_values"]
a_ = {
"max_position_embeddings": "n_positions",
"num_attention_heads": "n_head",
"num_hidden_layers": "n_layer",
}
def __init__( self : Optional[Any] , __A : Dict=1_7 , __A : Any=4 , __A : Tuple=1_2_8 , __A : Any=4_0_9_6 , __A : Tuple=True , __A : List[Any]=1 , __A : List[Any]=1_0_2_4 , __A : Optional[Any]=3 , __A : Any=1 , __A : Any=None , __A : List[str]="relu" , __A : Dict=0.1 , __A : Any=0.1 , __A : Union[str, Any]=0.1 , __A : Tuple=1e-5 , __A : Optional[Any]=0.0_2 , __A : Tuple=True , __A : Any=True , __A : Tuple=5_0_2_5_6 , __A : List[Any]=5_0_2_5_6 , __A : Any=False , __A : Optional[int]=False , **__A : Optional[Any] , ):
snake_case__ : List[str] = state_dim
snake_case__ : List[str] = act_dim
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : List[str] = max_ep_len
snake_case__ : List[str] = action_tanh
snake_case__ : List[str] = vocab_size
snake_case__ : List[Any] = n_positions
snake_case__ : Union[str, Any] = n_layer
snake_case__ : List[str] = n_head
snake_case__ : Tuple = n_inner
snake_case__ : str = activation_function
snake_case__ : Dict = resid_pdrop
snake_case__ : Any = embd_pdrop
snake_case__ : Any = attn_pdrop
snake_case__ : Union[str, Any] = layer_norm_epsilon
snake_case__ : List[str] = initializer_range
snake_case__ : List[Any] = scale_attn_weights
snake_case__ : Optional[int] = use_cache
snake_case__ : Union[str, Any] = scale_attn_by_inverse_layer_idx
snake_case__ : Dict = reorder_and_upcast_attn
snake_case__ : List[str] = bos_token_id
snake_case__ : Optional[int] = eos_token_id
super().__init__(bos_token_id=__A , eos_token_id=__A , **__A )
| 25 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , ):
if attention_mask is None:
snake_case__ : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case__ : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case__ : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ )
if decoder_head_mask is None:
snake_case__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
if cross_attn_head_mask is None:
snake_case__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Any , __A : List[str]=1_3 , __A : List[Any]=7 , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : str=9_9 , __A : Optional[Any]=1_6 , __A : Optional[Any]=2 , __A : Any=4 , __A : List[Any]=4 , __A : int="relu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[Any]=2_0 , __A : Optional[Any]=2 , __A : int=1 , __A : Union[str, Any]=0 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : List[str] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : List[str] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : int = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Tuple = eos_token_id
snake_case__ : Dict = pad_token_id
snake_case__ : str = bos_token_id
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.eos_token_id # Eos Token
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case__ : int = input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Union[str, Any] = self.get_config()
snake_case__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _lowercase ( self : Dict ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : Optional[Any] , __A : int , __A : Dict ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
snake_case__ : List[Any] = inputs_dict["input_ids"]
snake_case__ : Optional[Any] = inputs_dict["attention_mask"]
snake_case__ : Union[str, Any] = inputs_dict["head_mask"]
# first forward pass
snake_case__ : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
snake_case__, snake_case__ : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case__ : Tuple = model(__A , attention_mask=__A )["last_hidden_state"]
snake_case__ : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[
"last_hidden_state"
]
# select random slice
snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) )
def _lowercase ( self : str , __A : Dict , __A : Optional[Any] ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval()
snake_case__ : Union[str, Any] = model(**__A )
snake_case__ : Tuple = outputs.encoder_last_hidden_state
snake_case__ : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_encoder()
encoder.save_pretrained(__A )
snake_case__ : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_decoder()
decoder.save_pretrained(__A )
snake_case__ : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a_ = True
a_ = True
a_ = False
a_ = False
def _lowercase ( self : int , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowercase ( self : Tuple ):
snake_case__ : Any = MaMaaaModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__A )
def _lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
snake_case__, snake_case__ : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["missing_keys"] , [] )
def _lowercase ( self : Dict ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def _lowercase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case__ : str = model_class(__A )
model.to(__A )
model.eval()
snake_case__ : str = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
snake_case__ : Optional[Any] = inputs["input_ids"]
del inputs["input_ids"]
else:
snake_case__ : Union[str, Any] = inputs["input_ids"]
snake_case__ : List[str] = inputs.get("decoder_input_ids" , __A )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , __A )
snake_case__ : Tuple = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case__ : List[Any] = wte(__A )
else:
snake_case__ : Any = wte(__A )
snake_case__ : Optional[int] = wte(__A )
with torch.no_grad():
model(**__A )[0]
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Any = input_dict["input_ids"]
snake_case__ : int = input_ids.ne(1 ).to(__A )
snake_case__ : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ )
__lowerCamelCase : Optional[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ):
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def _lowercase ( self : Optional[int] ):
snake_case__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : str = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : Optional[Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
# change to intended input
snake_case__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : List[str] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
snake_case__ : List[Any] = [
"L'affaire NSA souligne l'absence totale de dรฉbat sur le renseignement",
"Selon moi, il y a deux niveaux de rรฉponse de la part du gouvernement franรงais.",
"Lorsque Franรงois Hollande tรฉlรฉphone ร Barack Obama ou quand le ministre des affaires รฉtrangรจres Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils rรฉagissent ร une vraie dรฉcouverte, qui est celle de"
" l'ampleur de la surveillance amรฉricaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case__ : str = tokenizer(__A , padding=__A , return_tensors="pt" )
snake_case__ : Tuple = model.generate(
input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
snake_case__ : List[str] = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When Franรงois Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
snake_case__ : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 25 | 1 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : int ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowercase ( self : Optional[int] ):
snake_case__ : Optional[Any] = {"col_1": [3, 2, 1, 0], "col_2": ["a", "b", "c", "d"]}
return Dataset.from_dict(__A )
def _lowercase ( self : int ):
snake_case__ : str = self._create_example_records()
snake_case__ : List[Any] = Dataset.from_list(__A )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(__A ):
self.assertDictEqual(__A , example_records[i] )
def _lowercase ( self : Optional[Any] ):
snake_case__ : int = self._create_example_records()
snake_case__ : Dict = Dataset.from_list(__A )
snake_case__ : List[str] = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def _lowercase ( self : List[str] ): # checks what happens with missing columns
snake_case__ : Union[str, Any] = [{"col_1": 1}, {"col_2": "x"}]
snake_case__ : Union[str, Any] = Dataset.from_list(__A )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def _lowercase ( self : Union[str, Any] ): # checks if the type can be inferred from the second record
snake_case__ : List[Any] = [{"col_1": []}, {"col_1": [1, 2]}]
snake_case__ : int = Dataset.from_list(__A )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def _lowercase ( self : Any ):
snake_case__ : Tuple = Dataset.from_list([] )
self.assertEqual(len(__A ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 25 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] ):
snake_case__ : Optional[int] = []
for part_id in partition_order:
snake_case__ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(snake_case_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Union[str, Any] = spark.range(100 ).repartition(1 )
snake_case__ : Any = Spark(snake_case_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[Any] = spark.range(10 ).repartition(2 )
snake_case__ : Optional[Any] = [1, 0]
snake_case__ : Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions.
snake_case__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case__, snake_case__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[int] = spark.range(10 ).repartition(1 )
snake_case__ : Union[str, Any] = SparkExamplesIterable(snake_case_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
snake_case__ : Union[str, Any] = lambda snake_case_ : x.reverse()
snake_case__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] )
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case__ : Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(100 ).repartition(1 )
snake_case__ : Union[str, Any] = Spark(snake_case_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 25 | 1 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int ):
return 1 if input_a == input_a else 0
def SCREAMING_SNAKE_CASE ( ):
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1))
| 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 1 |
from PIL import Image
def SCREAMING_SNAKE_CASE ( snake_case_ : Image ):
snake_case__, snake_case__ : Dict = image.size
snake_case__ : List[str] = 0
snake_case__ : List[Any] = image.load()
for i in range(snake_case_ ):
for j in range(snake_case_ ):
snake_case__ : int = pixels[j, i]
mean += pixel
mean //= width * height
for j in range(snake_case_ ):
for i in range(snake_case_ ):
snake_case__ : str = 255 if pixels[i, j] > mean else 0
return image
if __name__ == "__main__":
__lowerCamelCase : Dict = mean_threshold(Image.open("""path_to_image""").convert("""L"""))
image.save("""output_image_path""")
| 25 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 25 | 1 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , ):
if attention_mask is None:
snake_case__ : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case__ : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case__ : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ )
if decoder_head_mask is None:
snake_case__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
if cross_attn_head_mask is None:
snake_case__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Any , __A : List[str]=1_3 , __A : List[Any]=7 , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : str=9_9 , __A : Optional[Any]=1_6 , __A : Optional[Any]=2 , __A : Any=4 , __A : List[Any]=4 , __A : int="relu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[Any]=2_0 , __A : Optional[Any]=2 , __A : int=1 , __A : Union[str, Any]=0 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : List[str] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : List[str] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : int = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Tuple = eos_token_id
snake_case__ : Dict = pad_token_id
snake_case__ : str = bos_token_id
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.eos_token_id # Eos Token
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case__ : int = input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Union[str, Any] = self.get_config()
snake_case__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _lowercase ( self : Dict ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : Optional[Any] , __A : int , __A : Dict ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
snake_case__ : List[Any] = inputs_dict["input_ids"]
snake_case__ : Optional[Any] = inputs_dict["attention_mask"]
snake_case__ : Union[str, Any] = inputs_dict["head_mask"]
# first forward pass
snake_case__ : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
snake_case__, snake_case__ : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case__ : Tuple = model(__A , attention_mask=__A )["last_hidden_state"]
snake_case__ : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[
"last_hidden_state"
]
# select random slice
snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) )
def _lowercase ( self : str , __A : Dict , __A : Optional[Any] ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval()
snake_case__ : Union[str, Any] = model(**__A )
snake_case__ : Tuple = outputs.encoder_last_hidden_state
snake_case__ : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_encoder()
encoder.save_pretrained(__A )
snake_case__ : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_decoder()
decoder.save_pretrained(__A )
snake_case__ : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a_ = True
a_ = True
a_ = False
a_ = False
def _lowercase ( self : int , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowercase ( self : Tuple ):
snake_case__ : Any = MaMaaaModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__A )
def _lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
snake_case__, snake_case__ : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["missing_keys"] , [] )
def _lowercase ( self : Dict ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def _lowercase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case__ : str = model_class(__A )
model.to(__A )
model.eval()
snake_case__ : str = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
snake_case__ : Optional[Any] = inputs["input_ids"]
del inputs["input_ids"]
else:
snake_case__ : Union[str, Any] = inputs["input_ids"]
snake_case__ : List[str] = inputs.get("decoder_input_ids" , __A )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , __A )
snake_case__ : Tuple = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case__ : List[Any] = wte(__A )
else:
snake_case__ : Any = wte(__A )
snake_case__ : Optional[int] = wte(__A )
with torch.no_grad():
model(**__A )[0]
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Any = input_dict["input_ids"]
snake_case__ : int = input_ids.ne(1 ).to(__A )
snake_case__ : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ )
__lowerCamelCase : Optional[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ):
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def _lowercase ( self : Optional[int] ):
snake_case__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : str = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : Optional[Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
# change to intended input
snake_case__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : List[str] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
snake_case__ : List[Any] = [
"L'affaire NSA souligne l'absence totale de dรฉbat sur le renseignement",
"Selon moi, il y a deux niveaux de rรฉponse de la part du gouvernement franรงais.",
"Lorsque Franรงois Hollande tรฉlรฉphone ร Barack Obama ou quand le ministre des affaires รฉtrangรจres Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils rรฉagissent ร une vraie dรฉcouverte, qui est celle de"
" l'ampleur de la surveillance amรฉricaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case__ : str = tokenizer(__A , padding=__A , return_tensors="pt" )
snake_case__ : Tuple = model.generate(
input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
snake_case__ : List[str] = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When Franรงois Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
snake_case__ : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 25 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE ( snake_case_ : dict ):
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ):
snake_case__ : Optional[int] = XGBClassifier()
classifier.fit(snake_case_ , snake_case_ )
return classifier
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = load_iris()
snake_case__, snake_case__ : str = data_handling(snake_case_ )
snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split(
snake_case_ , snake_case_ , test_size=0.25 )
snake_case__ : Dict = iris["target_names"]
# Create an XGBoost Classifier from the training data
snake_case__ : Dict = xgboost(snake_case_ , snake_case_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 25 | 1 |
import argparse
from pathlib import Path
import torch
from transformers import OPTConfig, OPTModel
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : List[Any] = torch.load(snake_case_ , map_location="cpu" )
if "model" in sd.keys():
snake_case__ : List[Any] = torch.load(snake_case_ , map_location="cpu" )["model"]
# pop unnecessary weights
snake_case__ : List[str] = [
"decoder.version",
"decoder.output_projection.weight",
]
for key in keys_to_delete:
if key in sd:
sd.pop(snake_case_ )
snake_case__ : Union[str, Any] = {
"decoder.project_in_dim.weight": "decoder.project_in.weight",
"decoder.project_out_dim.weight": "decoder.project_out.weight",
"decoder.layer_norm.weight": "decoder.final_layer_norm.weight",
"decoder.layer_norm.bias": "decoder.final_layer_norm.bias",
}
for old_key, new_key in keys_to_rename.items():
if old_key in sd:
snake_case__ : Optional[int] = sd.pop(snake_case_ )
snake_case__ : Any = list(sd.keys() )
for key in keys:
if ".qkv_proj." in key:
snake_case__ : int = sd[key]
# We split QKV in separate Q,K,V
snake_case__ : List[Any] = key.replace(".qkv_proj." , ".q_proj." )
snake_case__ : Any = key.replace(".qkv_proj." , ".k_proj." )
snake_case__ : Optional[int] = key.replace(".qkv_proj." , ".v_proj." )
snake_case__ : int = value.shape[0]
assert depth % 3 == 0
# `SequeuceParallelTransformerBlock` has QKV weight is separated in K,V,Q despite the naming:
# https://cs.github.com/facebookresearch/metaseq/blob/51871bd73cd04c038f239ea2a26db1d7f6b37927/metaseq/modules/sequence_parallel_transformer_layer.py#L97
snake_case__, snake_case__, snake_case__ : Dict = torch.split(snake_case_ , depth // 3 , dim=0 )
snake_case__ : Union[str, Any] = q
snake_case__ : Optional[Any] = k
snake_case__ : Union[str, Any] = v
del sd[key]
return sd
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[Any] , snake_case_ : Any=None ):
snake_case__ : int = load_checkpoint(snake_case_ )
if config is not None:
snake_case__ : Tuple = OPTConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = OPTConfig()
snake_case__ : Union[str, Any] = OPTModel(snake_case_ ).half().eval()
model.load_state_dict(snake_case_ )
# Check results
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
model.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fairseq_path""",
type=str,
help=(
"""path to fairseq checkpoint in correct format. You can find all checkpoints in the correct format here:"""
""" https://huggingface.co/models?other=opt_metasq"""
),
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--hf_config""", default=None, type=str, help="""Define HF config.""")
__lowerCamelCase : Optional[int] = parser.parse_args()
convert_opt_checkpoint(args.fairseq_path, args.pytorch_dump_folder_path, config=args.hf_config)
| 25 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ):
snake_case__ : Tuple = args.log_outputs
snake_case__ : Union[str, Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
snake_case__ : List[str] = load_metric("wer" )
snake_case__ : List[str] = load_metric("cer" )
# compute metrics
snake_case__ : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
snake_case__ : List[str] = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
snake_case__ : Dict = F'''WER: {wer_result}\nCER: {cer_result}'''
print(snake_case_ )
with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case__ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt'''
snake_case__ : int = F'''log_{dataset_id}_targets.txt'''
with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Union[str, Any] , snake_case_ : Any ):
p.write(F'''{i}''' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(F'''{i}''' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case_ , with_indices=snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : List[Any] = "[,?.!\-\;\:\"โ%โโ๏ฟฝโโโฆโ]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case__ : Optional[int] = re.sub(snake_case_ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case__ : Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
snake_case__ : Optional[int] = " ".join(text.split(snake_case_ ) )
return text
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# load dataset
snake_case__ : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case__ : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case__ : List[Any] = feature_extractor.sampling_rate
# resample audio
snake_case__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
snake_case__ : int = 0 if torch.cuda.is_available() else -1
snake_case__ : List[str] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Any ):
snake_case__ : Union[str, Any] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case__ : Optional[int] = prediction["text"]
snake_case__ : Optional[Any] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
snake_case__ : Any = dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with ๐ค Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with ๐ค Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__lowerCamelCase : str = parser.parse_args()
main(args)
| 25 | 1 |
from cva import destroyAllWindows, imread, imshow, waitKey
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
# getting number of pixels in the image
snake_case__, snake_case__ : Optional[int] = img.shape[0], img.shape[1]
# converting each pixel's color to its negative
for i in range(snake_case_ ):
for j in range(snake_case_ ):
snake_case__ : Optional[int] = [255, 255, 255] - img[i][j]
return img
if __name__ == "__main__":
# read original image
__lowerCamelCase : Optional[Any] = imread("""image_data/lena.jpg""", 1)
# convert to its negative
__lowerCamelCase : Optional[Any] = convert_to_negative(img)
# show result image
imshow("""negative of original image""", img)
waitKey(0)
destroyAllWindows()
| 25 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
a_ = Features({"text": Value("string" )} )
a_ = Features({"labels": ClassLabel} )
a_ = "text"
a_ = "labels"
def _lowercase ( self : Tuple , __A : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
snake_case__ : Any = copy.deepcopy(self )
snake_case__ : Optional[Any] = self.label_schema.copy()
snake_case__ : List[str] = features[self.label_column]
snake_case__ : Dict = label_schema
return task_template
@property
def _lowercase ( self : Tuple ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 25 | 1 |
import itertools
import os
import random
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers import is_speech_available
from transformers.testing_utils import check_json_file_has_correct_format, require_torch, require_torchaudio
from transformers.utils.import_utils import is_torch_available
from ...test_sequence_feature_extraction_common import SequenceFeatureExtractionTestMixin
if is_speech_available():
from transformers import WhisperFeatureExtractor
if is_torch_available():
import torch
__lowerCamelCase : List[Any] = random.Random()
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any]=1.0 , snake_case_ : List[str]=None , snake_case_ : str=None ):
if rng is None:
snake_case__ : str = global_rng
snake_case__ : str = []
for batch_idx in range(shape[0] ):
values.append([] )
for _ in range(shape[1] ):
values[-1].append(rng.random() * scale )
return values
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Tuple , __A : Union[str, Any] , __A : List[Any]=7 , __A : int=4_0_0 , __A : Union[str, Any]=2_0_0_0 , __A : Union[str, Any]=1_0 , __A : Optional[Any]=1_6_0 , __A : int=8 , __A : Dict=0.0 , __A : Optional[int]=4_0_0_0 , __A : Optional[int]=False , __A : Union[str, Any]=True , ):
snake_case__ : List[str] = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : List[Any] = min_seq_length
snake_case__ : Union[str, Any] = max_seq_length
snake_case__ : Tuple = (self.max_seq_length - self.min_seq_length) // (self.batch_size - 1)
snake_case__ : Optional[Any] = padding_value
snake_case__ : List[str] = sampling_rate
snake_case__ : Optional[int] = return_attention_mask
snake_case__ : int = do_normalize
snake_case__ : Tuple = feature_size
snake_case__ : int = chunk_length
snake_case__ : Any = hop_length
def _lowercase ( self : int ):
return {
"feature_size": self.feature_size,
"hop_length": self.hop_length,
"chunk_length": self.chunk_length,
"padding_value": self.padding_value,
"sampling_rate": self.sampling_rate,
"return_attention_mask": self.return_attention_mask,
"do_normalize": self.do_normalize,
}
def _lowercase ( self : List[str] , __A : Any=False , __A : Dict=False ):
def _flatten(__A : Any ):
return list(itertools.chain(*__A ) )
if equal_length:
snake_case__ : Any = [floats_list((self.max_seq_length, self.feature_size) ) for _ in range(self.batch_size )]
else:
# make sure that inputs increase in size
snake_case__ : Any = [
floats_list((x, self.feature_size) )
for x in range(self.min_seq_length , self.max_seq_length , self.seq_length_diff )
]
if numpify:
snake_case__ : Tuple = [np.asarray(__A ) for x in speech_inputs]
return speech_inputs
@require_torch
@require_torchaudio
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = WhisperFeatureExtractor if is_speech_available() else None
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = WhisperFeatureExtractionTester(self )
def _lowercase ( self : Tuple ):
snake_case__ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : List[str] = feat_extract_first.save_pretrained(__A )[0]
check_json_file_has_correct_format(__A )
snake_case__ : List[str] = self.feature_extraction_class.from_pretrained(__A )
snake_case__ : Tuple = feat_extract_first.to_dict()
snake_case__ : List[Any] = feat_extract_second.to_dict()
snake_case__ : int = feat_extract_first.mel_filters
snake_case__ : Any = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Any = os.path.join(__A , "feat_extract.json" )
feat_extract_first.to_json_file(__A )
snake_case__ : List[str] = self.feature_extraction_class.from_json_file(__A )
snake_case__ : Union[str, Any] = feat_extract_first.to_dict()
snake_case__ : List[Any] = feat_extract_second.to_dict()
snake_case__ : Any = feat_extract_first.mel_filters
snake_case__ : Tuple = feat_extract_second.mel_filters
self.assertTrue(np.allclose(__A , __A ) )
self.assertEqual(__A , __A )
def _lowercase ( self : Any ):
# Tests that all call wrap to encode_plus and batch_encode_plus
snake_case__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
# create three inputs of length 800, 1000, and 1200
snake_case__ : int = [floats_list((1, x) )[0] for x in range(8_0_0 , 1_4_0_0 , 2_0_0 )]
snake_case__ : Any = [np.asarray(__A ) for speech_input in speech_inputs]
# Test feature size
snake_case__ : List[str] = feature_extractor(__A , padding="max_length" , return_tensors="np" ).input_features
self.assertTrue(input_features.ndim == 3 )
self.assertTrue(input_features.shape[-1] == feature_extractor.nb_max_frames )
self.assertTrue(input_features.shape[-2] == feature_extractor.feature_size )
# Test not batched input
snake_case__ : Any = feature_extractor(speech_inputs[0] , return_tensors="np" ).input_features
snake_case__ : int = feature_extractor(np_speech_inputs[0] , return_tensors="np" ).input_features
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
# Test batched
snake_case__ : List[str] = feature_extractor(__A , return_tensors="np" ).input_features
snake_case__ : Tuple = feature_extractor(__A , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
# Test 2-D numpy arrays are batched.
snake_case__ : Optional[Any] = [floats_list((1, x) )[0] for x in (8_0_0, 8_0_0, 8_0_0)]
snake_case__ : List[Any] = np.asarray(__A )
snake_case__ : Tuple = feature_extractor(__A , return_tensors="np" ).input_features
snake_case__ : Union[str, Any] = feature_extractor(__A , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
# Test truncation required
snake_case__ : Optional[Any] = [floats_list((1, x) )[0] for x in range(2_0_0 , (feature_extractor.n_samples + 5_0_0) , 2_0_0 )]
snake_case__ : Optional[Any] = [np.asarray(__A ) for speech_input in speech_inputs]
snake_case__ : Union[str, Any] = [x[: feature_extractor.n_samples] for x in speech_inputs]
snake_case__ : List[Any] = [np.asarray(__A ) for speech_input in speech_inputs_truncated]
snake_case__ : Union[str, Any] = feature_extractor(__A , return_tensors="np" ).input_features
snake_case__ : Tuple = feature_extractor(__A , return_tensors="np" ).input_features
for enc_seq_a, enc_seq_a in zip(__A , __A ):
self.assertTrue(np.allclose(__A , __A , atol=1e-3 ) )
def _lowercase ( self : Optional[Any] ):
import torch
snake_case__ : int = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : List[Any] = np.random.rand(1_0_0 , 3_2 ).astype(np.floataa )
snake_case__ : Union[str, Any] = np_speech_inputs.tolist()
for inputs in [py_speech_inputs, np_speech_inputs]:
snake_case__ : Optional[Any] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="np" )
self.assertTrue(np_processed.input_features.dtype == np.floataa )
snake_case__ : List[str] = feature_extractor.pad([{"input_features": inputs}] , return_tensors="pt" )
self.assertTrue(pt_processed.input_features.dtype == torch.floataa )
def _lowercase ( self : List[str] , __A : Dict ):
snake_case__ : Any = load_dataset("hf-internal-testing/librispeech_asr_dummy" , "clean" , split="validation" )
# automatic decoding with librispeech
snake_case__ : Optional[int] = ds.sort("id" ).select(range(__A ) )[:num_samples]["audio"]
return [x["array"] for x in speech_samples]
def _lowercase ( self : Union[str, Any] ):
# fmt: off
snake_case__ : List[str] = torch.tensor(
[
0.1_1_9_3, -0.0_9_4_6, -0.1_0_9_8, -0.0_1_9_6, 0.0_2_2_5, -0.0_6_9_0, -0.1_7_3_6, 0.0_9_5_1,
0.0_9_7_1, -0.0_8_1_7, -0.0_7_0_2, 0.0_1_6_2, 0.0_2_6_0, 0.0_0_1_7, -0.0_1_9_2, -0.1_6_7_8,
0.0_7_0_9, -0.1_8_6_7, -0.0_6_5_5, -0.0_2_7_4, -0.0_2_3_4, -0.1_8_8_4, -0.0_5_1_6, -0.0_5_5_4,
-0.0_2_7_4, -0.1_4_2_5, -0.1_4_2_3, 0.0_8_3_7, 0.0_3_7_7, -0.0_8_5_4
] )
# fmt: on
snake_case__ : Tuple = self._load_datasamples(1 )
snake_case__ : Any = WhisperFeatureExtractor()
snake_case__ : List[str] = feature_extractor(__A , return_tensors="pt" ).input_features
self.assertEqual(input_features.shape , (1, 8_0, 3_0_0_0) )
self.assertTrue(torch.allclose(input_features[0, 0, :3_0] , __A , atol=1e-4 ) )
def _lowercase ( self : Tuple ):
snake_case__ : Tuple = self.feature_extraction_class(**self.feat_extract_tester.prepare_feat_extract_dict() )
snake_case__ : Dict = self._load_datasamples(1 )[0]
snake_case__ : Dict = ((audio - audio.min()) / (audio.max() - audio.min())) * 6_5_5_3_5 # Rescale to [0, 65535] to show issue
snake_case__ : List[str] = feat_extract.zero_mean_unit_var_norm([audio] , attention_mask=__A )[0]
self.assertTrue(np.all(np.mean(__A ) < 1e-3 ) )
self.assertTrue(np.all(np.abs(np.var(__A ) - 1 ) < 1e-3 ) )
| 25 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_vision_model"
def __init__( self : List[Any] , __A : Dict=1_4_0_8 , __A : Tuple=6_1_4_4 , __A : str=3_9 , __A : int=1_6 , __A : str=2_2_4 , __A : Any=1_4 , __A : Dict="gelu" , __A : List[Any]=1e-6 , __A : Any=0.0 , __A : List[Any]=1e-1_0 , __A : Union[str, Any]=True , **__A : Tuple , ):
super().__init__(**__A )
snake_case__ : List[str] = hidden_size
snake_case__ : Optional[int] = intermediate_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : str = patch_size
snake_case__ : int = image_size
snake_case__ : int = initializer_range
snake_case__ : Optional[int] = attention_dropout
snake_case__ : str = layer_norm_eps
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Tuple = qkv_bias
@classmethod
def _lowercase ( cls : List[str] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : str = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : Union[str, Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_qformer"
def __init__( self : Any , __A : Union[str, Any]=3_0_5_2_2 , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=1_2 , __A : Dict=1_2 , __A : Dict=3_0_7_2 , __A : List[str]="gelu" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Any=5_1_2 , __A : Optional[int]=0.0_2 , __A : List[str]=1e-1_2 , __A : Any=0 , __A : Optional[Any]="absolute" , __A : str=2 , __A : Any=1_4_0_8 , **__A : List[str] , ):
super().__init__(pad_token_id=__A , **__A )
snake_case__ : Dict = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : int = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : Dict = cross_attention_frequency
snake_case__ : List[str] = encoder_hidden_size
@classmethod
def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Optional[int] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : Tuple = cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : List[Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip"
a_ = True
def __init__( self : List[str] , __A : Optional[Any]=None , __A : Tuple=None , __A : Optional[int]=None , __A : Optional[Any]=3_2 , **__A : Optional[int] ):
super().__init__(**__A )
if vision_config is None:
snake_case__ : Any = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
snake_case__ : Optional[Any] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
snake_case__ : Optional[int] = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
snake_case__ : List[Any] = InstructBlipVisionConfig(**__A )
snake_case__ : Union[str, Any] = InstructBlipQFormerConfig(**__A )
snake_case__ : Dict = text_config["model_type"] if "model_type" in text_config else "opt"
snake_case__ : List[Any] = CONFIG_MAPPING[text_model_type](**__A )
snake_case__ : Union[str, Any] = self.text_config.tie_word_embeddings
snake_case__ : Tuple = self.text_config.is_encoder_decoder
snake_case__ : str = num_query_tokens
snake_case__ : Dict = self.vision_config.hidden_size
snake_case__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case__ : int = 1.0
snake_case__ : Optional[int] = 0.0_2
@classmethod
def _lowercase ( cls : List[str] , __A : InstructBlipVisionConfig , __A : InstructBlipQFormerConfig , __A : PretrainedConfig , **__A : int , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def _lowercase ( self : Optional[int] ):
snake_case__ : Any = copy.deepcopy(self.__dict__ )
snake_case__ : Optional[Any] = self.vision_config.to_dict()
snake_case__ : List[str] = self.qformer_config.to_dict()
snake_case__ : List[Any] = self.text_config.to_dict()
snake_case__ : List[Any] = self.__class__.model_type
return output
| 25 | 1 |
import mpmath # for roots of unity
import numpy as np
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Optional[int] , __A : int=None , __A : List[Any]=None ):
# Input as list
snake_case__ : Optional[int] = list(poly_a or [0] )[:]
snake_case__ : Optional[int] = list(poly_b or [0] )[:]
# Remove leading zero coefficients
while self.polyA[-1] == 0:
self.polyA.pop()
snake_case__ : Optional[int] = len(self.polyA )
while self.polyB[-1] == 0:
self.polyB.pop()
snake_case__ : Dict = len(self.polyB )
# Add 0 to make lengths equal a power of 2
snake_case__ : Optional[Any] = int(
2 ** np.ceil(np.loga(len(self.polyA ) + len(self.polyB ) - 1 ) ) )
while len(self.polyA ) < self.c_max_length:
self.polyA.append(0 )
while len(self.polyB ) < self.c_max_length:
self.polyB.append(0 )
# A complex root used for the fourier transform
snake_case__ : List[str] = complex(mpmath.root(x=1 , n=self.c_max_length , k=1 ) )
# The product
snake_case__ : List[Any] = self.__multiply()
def _lowercase ( self : str , __A : str ):
snake_case__ : List[Any] = [[x] for x in self.polyA] if which == "A" else [[x] for x in self.polyB]
# Corner case
if len(__A ) <= 1:
return dft[0]
#
snake_case__ : Union[str, Any] = self.c_max_length // 2
while next_ncol > 0:
snake_case__ : Dict = [[] for i in range(__A )]
snake_case__ : Optional[Any] = self.root**next_ncol
# First half of next step
snake_case__ : Any = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__A ):
new_dft[i].append(dft[i][j] + current_root * dft[i + next_ncol][j] )
current_root *= root
# Second half of next step
snake_case__ : str = 1
for j in range(self.c_max_length // (next_ncol * 2) ):
for i in range(__A ):
new_dft[i].append(dft[i][j] - current_root * dft[i + next_ncol][j] )
current_root *= root
# Update
snake_case__ : List[Any] = new_dft
snake_case__ : Dict = next_ncol // 2
return dft[0]
def _lowercase ( self : Optional[Any] ):
snake_case__ : Tuple = self.__dft("A" )
snake_case__ : int = self.__dft("B" )
snake_case__ : Optional[Any] = [[dft_a[i] * dft_b[i] for i in range(self.c_max_length )]]
del dft_a
del dft_b
# Corner Case
if len(inverce_c[0] ) <= 1:
return inverce_c[0]
# Inverse DFT
snake_case__ : str = 2
while next_ncol <= self.c_max_length:
snake_case__ : List[Any] = [[] for i in range(__A )]
snake_case__ : Any = self.root ** (next_ncol // 2)
snake_case__ : Tuple = 1
# First half of next step
for j in range(self.c_max_length // next_ncol ):
for i in range(next_ncol // 2 ):
# Even positions
new_inverse_c[i].append(
(
inverce_c[i][j]
+ inverce_c[i][j + self.c_max_length // next_ncol]
)
/ 2 )
# Odd positions
new_inverse_c[i + next_ncol // 2].append(
(
inverce_c[i][j]
- inverce_c[i][j + self.c_max_length // next_ncol]
)
/ (2 * current_root) )
current_root *= root
# Update
snake_case__ : Dict = new_inverse_c
next_ncol *= 2
# Unpack
snake_case__ : List[str] = [round(x[0].real , 8 ) + round(x[0].imag , 8 ) * 1j for x in inverce_c]
# Remove leading 0's
while inverce_c[-1] == 0:
inverce_c.pop()
return inverce_c
def __str__( self : List[Any] ):
snake_case__ : Union[str, Any] = "A = " + " + ".join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyA[: self.len_A] ) )
snake_case__ : Optional[Any] = "B = " + " + ".join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.polyB[: self.len_B] ) )
snake_case__ : Tuple = "A*B = " + " + ".join(
f'''{coef}*x^{i}''' for coef, i in enumerate(self.product ) )
return f'''{a}\n{b}\n{c}'''
# Unit tests
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 |
def SCREAMING_SNAKE_CASE ( snake_case_ : list ):
if len(snake_case_ ) <= 1:
return lst
snake_case__ : List[Any] = 1
while i < len(snake_case_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
snake_case__, snake_case__ : Tuple = lst[i], lst[i - 1]
i -= 1
if i == 0:
snake_case__ : Union[str, Any] = 1
return lst
if __name__ == "__main__":
__lowerCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 25 | 1 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[int] = get_logger()
__lowerCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case__ : str = str(jax.devices()[0] )
snake_case__ : str = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def _lowercase ( self : Optional[Any] , __A : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def _lowercase ( self : int , __A : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ : Optional[int] = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case__ : Any = {"dtype": jnp.intaa}
else:
snake_case__ : Tuple = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
snake_case__ : Optional[Any] = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ):
snake_case__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def _lowercase ( self : Tuple , __A : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def _lowercase ( self : Optional[int] , __A : pa.Table ):
snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A )
snake_case__ : Tuple = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def _lowercase ( self : Optional[Any] , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A )
snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
snake_case__ : Dict = self._consolidate(__A )
return column
def _lowercase ( self : str , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A )
snake_case__ : int = self.python_features_decoder.decode_batch(__A )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
for column_name in batch:
snake_case__ : Any = self._consolidate(batch[column_name] )
return batch
| 25 |
from __future__ import annotations
import time
__lowerCamelCase : str = list[tuple[int, int]]
__lowerCamelCase : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ):
snake_case__ : Optional[int] = pos_x
snake_case__ : Dict = pos_y
snake_case__ : int = (pos_y, pos_x)
snake_case__ : Optional[int] = goal_x
snake_case__ : Tuple = goal_y
snake_case__ : str = parent
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ):
snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A )
snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A )
snake_case__ : int = [self.start]
snake_case__ : Union[str, Any] = False
def _lowercase ( self : Dict ):
while self.node_queue:
snake_case__ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ : Optional[Any] = True
return self.retrace_path(__A )
snake_case__ : int = self.get_successors(__A )
for node in successors:
self.node_queue.append(__A )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Union[str, Any] , __A : Node ):
snake_case__ : str = []
for action in delta:
snake_case__ : str = parent.pos_x + action[1]
snake_case__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) )
return successors
def _lowercase ( self : Optional[Any] , __A : Node | None ):
snake_case__ : Tuple = node
snake_case__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : Tuple = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : str , __A : int ):
snake_case__ : str = BreadthFirstSearch(__A , __A )
snake_case__ : int = BreadthFirstSearch(__A , __A )
snake_case__ : Tuple = False
def _lowercase ( self : Optional[Any] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 )
snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ : List[str] = True
return self.retrace_bidirectional_path(
__A , __A )
snake_case__ : Union[str, Any] = current_bwd_node
snake_case__ : Dict = current_fwd_node
snake_case__ : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(__A ),
self.bwd_bfs: self.bwd_bfs.get_successors(__A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Any , __A : Node , __A : Node ):
snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A )
snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowerCamelCase : str = (0, 0)
__lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : Any = time.time()
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal)
__lowerCamelCase : str = bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
__lowerCamelCase : Optional[Any] = time.time()
__lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
__lowerCamelCase : str = bd_bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 25 | 1 |
import argparse
import os
import torch
from transformers import FlavaConfig, FlavaForPreTraining
from transformers.models.flava.convert_dalle_to_flava_codebook import convert_dalle_checkpoint
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
# encoder.embeddings are double copied in original FLAVA
return sum(param.float().sum() if "encoder.embeddings" not in key else 0 for key, param in state_dict.items() )
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str ):
snake_case__ : Tuple = {}
for key, value in state_dict.items():
if "text_encoder.embeddings" in key or "image_encoder.embeddings" in key:
continue
snake_case__ : Any = key.replace("heads.cmd.mim_head.cls.predictions" , "mmm_image_head" )
snake_case__ : str = key.replace("heads.cmd.mlm_head.cls.predictions" , "mmm_text_head" )
snake_case__ : Optional[int] = key.replace("heads.cmd.itm_head.cls" , "itm_head" )
snake_case__ : Dict = key.replace("heads.cmd.itm_head.pooler" , "itm_head.pooler" )
snake_case__ : Optional[int] = key.replace("heads.cmd.clip_head.logit_scale" , "flava.logit_scale" )
snake_case__ : Optional[int] = key.replace("heads.fairseq_mlm.cls.predictions" , "mlm_head" )
snake_case__ : Tuple = key.replace("heads.imagenet.mim_head.cls.predictions" , "mim_head" )
snake_case__ : Any = key.replace("mm_text_projection" , "flava.text_to_mm_projection" )
snake_case__ : Optional[Any] = key.replace("mm_image_projection" , "flava.image_to_mm_projection" )
snake_case__ : Any = key.replace("image_encoder.module" , "flava.image_model" )
snake_case__ : Dict = key.replace("text_encoder.module" , "flava.text_model" )
snake_case__ : Union[str, Any] = key.replace("mm_encoder.module.encoder.cls_token" , "flava.multimodal_model.cls_token" )
snake_case__ : Dict = key.replace("mm_encoder.module" , "flava.multimodal_model" )
snake_case__ : Dict = key.replace("text_projection" , "flava.text_projection" )
snake_case__ : Optional[int] = key.replace("image_projection" , "flava.image_projection" )
snake_case__ : Optional[Any] = value.float()
for key, value in codebook_state_dict.items():
snake_case__ : List[Any] = value
return upgrade
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Dict , snake_case_ : str , snake_case_ : List[Any]=None ):
if config_path is not None:
snake_case__ : List[str] = FlavaConfig.from_pretrained(snake_case_ )
else:
snake_case__ : List[str] = FlavaConfig()
snake_case__ : Dict = FlavaForPreTraining(snake_case_ ).eval()
snake_case__ : Optional[int] = convert_dalle_checkpoint(snake_case_ , snake_case_ , save_checkpoint=snake_case_ )
if os.path.exists(snake_case_ ):
snake_case__ : Any = torch.load(snake_case_ , map_location="cpu" )
else:
snake_case__ : List[Any] = torch.hub.load_state_dict_from_url(snake_case_ , map_location="cpu" )
snake_case__ : Union[str, Any] = upgrade_state_dict(snake_case_ , snake_case_ )
hf_model.load_state_dict(snake_case_ )
snake_case__ : Dict = hf_model.state_dict()
snake_case__ : Union[str, Any] = count_parameters(snake_case_ )
snake_case__ : str = count_parameters(snake_case_ ) + count_parameters(snake_case_ )
assert torch.allclose(snake_case_ , snake_case_ , atol=1E-3 )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to flava checkpoint""")
parser.add_argument("""--codebook_path""", default=None, type=str, help="""Path to flava codebook checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCamelCase : Dict = parser.parse_args()
convert_flava_checkpoint(args.checkpoint_path, args.codebook_path, args.pytorch_dump_folder_path, args.config_path)
| 25 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __A : Dict , __A : int=7 , __A : Optional[Any]=3 , __A : List[str]=3_0 , __A : List[Any]=4_0_0 , __A : Union[str, Any]=True , __A : List[Any]=None , __A : Optional[Any]=True , __A : Tuple=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : Any=1 / 2_5_5 , __A : Optional[int]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : str = min_resolution
snake_case__ : Tuple = max_resolution
snake_case__ : List[Any] = do_resize
snake_case__ : Dict = size
snake_case__ : List[str] = do_normalize
snake_case__ : Optional[int] = image_mean
snake_case__ : Optional[int] = image_std
snake_case__ : Any = do_rescale
snake_case__ : Optional[int] = rescale_factor
snake_case__ : int = do_pad
def _lowercase ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[int] , __A : Dict , __A : List[Any]=False ):
if not batched:
snake_case__ : List[str] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : Tuple = image.size
else:
snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case__ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case__ : List[Any] = self.size["shortest_edge"]
snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Dict = self.size["shortest_edge"]
snake_case__ : Dict = self.size["shortest_edge"]
else:
snake_case__ : str = []
for image in image_inputs:
snake_case__, snake_case__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Dict = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : Tuple = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : int ):
snake_case__ : Tuple = ConditionalDetrImageProcessingTester(self )
@property
def _lowercase ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Any ):
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : List[str] ):
snake_case__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : Union[str, Any] ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : str = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : List[Any] ):
# prepare image and target
snake_case__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Union[str, Any] = json.loads(f.read() )
snake_case__ : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : Tuple = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case__ : int = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : str ):
# prepare image, target and masks_path
snake_case__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : int = json.loads(f.read() )
snake_case__ : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : Optional[int] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : str = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 1 |
import argparse
import datetime
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : str = {
"0": "Sunday",
"1": "Monday",
"2": "Tuesday",
"3": "Wednesday",
"4": "Thursday",
"5": "Friday",
"6": "Saturday",
}
snake_case__ : Dict = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(snake_case_ ) < 11:
raise ValueError("Must be 10 characters long" )
# Get month
snake_case__ : int = int(date_input[0] + date_input[1] )
# Validate
if not 0 < m < 13:
raise ValueError("Month must be between 1 - 12" )
snake_case__ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get day
snake_case__ : int = int(date_input[3] + date_input[4] )
# Validate
if not 0 < d < 32:
raise ValueError("Date must be between 1 - 31" )
# Get second separator
snake_case__ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("Date separator must be '-' or '/'" )
# Get year
snake_case__ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9] )
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"Year out of range. There has to be some sort of limit...right?" )
# Get datetime obj for validation
snake_case__ : Dict = datetime.date(int(snake_case_ ) , int(snake_case_ ) , int(snake_case_ ) )
# Start math
if m <= 2:
snake_case__ : str = y - 1
snake_case__ : Optional[Any] = m + 12
# maths var
snake_case__ : int = int(str(snake_case_ )[:2] )
snake_case__ : int = int(str(snake_case_ )[2:] )
snake_case__ : int = int(2.6 * m - 5.39 )
snake_case__ : int = int(c / 4 )
snake_case__ : int = int(k / 4 )
snake_case__ : int = int(d + k )
snake_case__ : int = int(t + u + v + x )
snake_case__ : int = int(z - (2 * c) )
snake_case__ : int = round(w % 7 )
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("The date was evaluated incorrectly. Contact developer." )
# Response
snake_case__ : str = F'''Your date {date_input}, is a {days[str(snake_case_ )]}!'''
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : str = argparse.ArgumentParser(
description=(
"""Find out what day of the week nearly any date is or was. Enter """
"""date as a string in the mm-dd-yyyy or mm/dd/yyyy format"""
)
)
parser.add_argument(
"""date_input""", type=str, help="""Date as a string (mm-dd-yyyy or mm/dd/yyyy)"""
)
__lowerCamelCase : str = parser.parse_args()
zeller(args.date_input)
| 25 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__lowerCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__lowerCamelCase : str = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using KullbackโLeibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__lowerCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def _lowercase ( self : Union[str, Any] , __A : Dict , __A : List[str] , __A : int=None , __A : List[Any]=None , __A : Optional[int]=None , __A : List[Any]=None , __A : Union[str, Any]="auto" , __A : Optional[Any]=-1 , __A : Optional[Any]=0.9 , __A : Any=5 , __A : List[Any]=5_0_0 , __A : Tuple="gpt2-large" , __A : Optional[Any]=-1 , __A : str=1_0_2_4 , __A : Tuple=2_5 , __A : str=5 , __A : Optional[int]=True , __A : Any=2_5 , ):
snake_case__ : List[Any] = compute_mauve(
p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , )
return out
| 25 | 1 |
import logging
import math
import os
from dataclasses import dataclass, field
from glob import glob
from typing import Optional
from torch.utils.data import ConcatDataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_WITH_LM_HEAD_MAPPING,
AutoConfig,
AutoModelWithLMHead,
AutoTokenizer,
DataCollatorForLanguageModeling,
DataCollatorForPermutationLanguageModeling,
DataCollatorForWholeWordMask,
HfArgumentParser,
LineByLineTextDataset,
LineByLineWithRefDataset,
PreTrainedTokenizer,
TextDataset,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import is_main_process
__lowerCamelCase : Optional[Any] = logging.getLogger(__name__)
__lowerCamelCase : Any = list(MODEL_WITH_LM_HEAD_MAPPING.keys())
__lowerCamelCase : List[Any] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = field(
default=UpperCamelCase_ , metadata={
"help": (
"The model checkpoint for weights initialization. Leave None if you want to train a model from"
" scratch."
)
} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase_ )} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = field(
default=UpperCamelCase_ , metadata={"help": "The input training data file (a text file)."} )
a_ = field(
default=UpperCamelCase_ , metadata={
"help": (
"The input training data files (multiple files in glob format). "
"Very often splitting large files to smaller files can prevent tokenizer going out of memory"
)
} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "An optional input train ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "An optional input eval ref data file for whole word mask in Chinese."} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Whether distinct lines of text in the dataset are to be handled as distinct sequences."} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Train with masked-language modeling loss instead of language modeling."} )
a_ = field(default=UpperCamelCase_ , metadata={"help": "Whether ot not to use whole word mask."} )
a_ = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a_ = field(
default=1 / 6 , metadata={
"help": (
"Ratio of length of a span of masked tokens to surrounding context length for permutation language"
" modeling."
)
} , )
a_ = field(
default=5 , metadata={"help": "Maximum length of a span of masked tokens for permutation language modeling."} )
a_ = field(
default=-1 , metadata={
"help": (
"Optional input sequence length after tokenization."
"The training dataset will be truncated in block of this size for training."
"Default to the model max input length for single sentence inputs (take into account special tokens)."
)
} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def SCREAMING_SNAKE_CASE ( snake_case_ : DataTrainingArguments , snake_case_ : PreTrainedTokenizer , snake_case_ : bool = False , snake_case_ : Optional[str] = None , ):
def _dataset(snake_case_ : str , snake_case_ : str=None ):
if args.line_by_line:
if ref_path is not None:
if not args.whole_word_mask or not args.mlm:
raise ValueError("You need to set world whole masking and mlm to True for Chinese Whole Word Mask" )
return LineByLineWithRefDataset(
tokenizer=snake_case_ , file_path=snake_case_ , block_size=args.block_size , ref_path=snake_case_ , )
return LineByLineTextDataset(tokenizer=snake_case_ , file_path=snake_case_ , block_size=args.block_size )
else:
return TextDataset(
tokenizer=snake_case_ , file_path=snake_case_ , block_size=args.block_size , overwrite_cache=args.overwrite_cache , cache_dir=snake_case_ , )
if evaluate:
return _dataset(args.eval_data_file , args.eval_ref_file )
elif args.train_data_files:
return ConcatDataset([_dataset(snake_case_ ) for f in glob(args.train_data_files )] )
else:
return _dataset(args.train_data_file , args.train_ref_file )
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
snake_case__, snake_case__, snake_case__ : List[Any] = parser.parse_args_into_dataclasses()
if data_args.eval_data_file is None and training_args.do_eval:
raise ValueError(
"Cannot do evaluation without an evaluation data file. Either supply a file to --eval_data_file "
"or remove the --do_eval argument." )
if (
os.path.exists(training_args.output_dir )
and os.listdir(training_args.output_dir )
and training_args.do_train
and not training_args.overwrite_output_dir
):
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. Use'''
" --overwrite_output_dir to overcome." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.local_rank != -1 ) , training_args.fpaa , )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , snake_case_ )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
if model_args.config_name:
snake_case__ : Union[str, Any] = AutoConfig.from_pretrained(model_args.config_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
snake_case__ : Union[str, Any] = AutoConfig.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
snake_case__ : List[str] = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.tokenizer_name:
snake_case__ : Union[str, Any] = AutoTokenizer.from_pretrained(model_args.tokenizer_name , cache_dir=model_args.cache_dir )
elif model_args.model_name_or_path:
snake_case__ : Optional[Any] = AutoTokenizer.from_pretrained(model_args.model_name_or_path , cache_dir=model_args.cache_dir )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported, but you can do it from another"
" script, save it,and load it from here, using --tokenizer_name" )
if model_args.model_name_or_path:
snake_case__ : Optional[Any] = AutoModelWithLMHead.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , )
else:
logger.info("Training new model from scratch" )
snake_case__ : Optional[int] = AutoModelWithLMHead.from_config(snake_case_ )
model.resize_token_embeddings(len(snake_case_ ) )
if config.model_type in ["bert", "roberta", "distilbert", "camembert"] and not data_args.mlm:
raise ValueError(
"BERT and RoBERTa-like models do not have LM heads but masked LM heads. They must be run using the"
"--mlm flag (masked language modeling)." )
if data_args.block_size <= 0:
snake_case__ : Dict = tokenizer.max_len
# Our input block size will be the max possible for the model
else:
snake_case__ : Any = min(data_args.block_size , tokenizer.max_len )
# Get datasets
snake_case__ : Optional[int] = (
get_dataset(snake_case_ , tokenizer=snake_case_ , cache_dir=model_args.cache_dir ) if training_args.do_train else None
)
snake_case__ : str = (
get_dataset(snake_case_ , tokenizer=snake_case_ , evaluate=snake_case_ , cache_dir=model_args.cache_dir )
if training_args.do_eval
else None
)
if config.model_type == "xlnet":
snake_case__ : int = DataCollatorForPermutationLanguageModeling(
tokenizer=snake_case_ , plm_probability=data_args.plm_probability , max_span_length=data_args.max_span_length , )
else:
if data_args.mlm and data_args.whole_word_mask:
snake_case__ : int = DataCollatorForWholeWordMask(
tokenizer=snake_case_ , mlm_probability=data_args.mlm_probability )
else:
snake_case__ : List[str] = DataCollatorForLanguageModeling(
tokenizer=snake_case_ , mlm=data_args.mlm , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
snake_case__ : Optional[Any] = Trainer(
model=snake_case_ , args=snake_case_ , data_collator=snake_case_ , train_dataset=snake_case_ , eval_dataset=snake_case_ , prediction_loss_only=snake_case_ , )
# Training
if training_args.do_train:
snake_case__ : Any = (
model_args.model_name_or_path
if model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path )
else None
)
trainer.train(model_path=snake_case_ )
trainer.save_model()
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
if trainer.is_world_master():
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
snake_case__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case__ : Union[str, Any] = trainer.evaluate()
snake_case__ : str = math.exp(eval_output["eval_loss"] )
snake_case__ : List[str] = {"perplexity": perplexity}
snake_case__ : int = os.path.join(training_args.output_dir , "eval_results_lm.txt" )
if trainer.is_world_master():
with open(snake_case_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key in sorted(result.keys() ):
logger.info(" %s = %s" , snake_case_ , str(result[key] ) )
writer.write("%s = %s\n" % (key, str(result[key] )) )
results.update(snake_case_ )
return results
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 25 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCamelCase : Union[str, Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCamelCase : List[Any] = concatenate_datasets
__lowerCamelCase : List[str] = DownloadConfig
__lowerCamelCase : Union[str, Any] = DownloadManager
__lowerCamelCase : str = DownloadMode
__lowerCamelCase : Union[str, Any] = DownloadConfig
__lowerCamelCase : List[str] = DownloadMode
__lowerCamelCase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 25 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__ : Tuple = str(SCREAMING_SNAKE_CASE_ )
return n == n[::-1]
def SCREAMING_SNAKE_CASE ( snake_case_ : str = 1000000 ):
snake_case__ : List[Any] = 0
for i in range(1 , SCREAMING_SNAKE_CASE_ ):
if is_palindrome(SCREAMING_SNAKE_CASE_ ) and is_palindrome(bin(SCREAMING_SNAKE_CASE_ ).split("b" )[1] ):
total += i
return total
if __name__ == "__main__":
print(solution(int(str(input().strip()))))
| 700 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : str = [True] * limit
snake_case__ : str = False
snake_case__ : str = False
snake_case__ : str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
snake_case__ : Optional[Any] = i * 2
while index < limit:
snake_case__ : Union[str, Any] = False
snake_case__ : Any = index + i
snake_case__ : Optional[Any] = [2]
for i in range(3 , snake_case_ , 2 ):
if is_prime[i]:
primes.append(snake_case_ )
return primes
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000000 ):
snake_case__ : Optional[int] = prime_sieve(snake_case_ )
snake_case__ : List[Any] = 0
snake_case__ : List[str] = 0
for i in range(len(snake_case_ ) ):
for j in range(i + length , len(snake_case_ ) ):
snake_case__ : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
snake_case__ : Tuple = j - i
snake_case__ : str = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 25 | 0 |
import pytest
from datasets import inspect_metric, list_metrics, load_metric
@pytest.fixture
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
monkeypatch.setattr("datasets.utils.deprecation_utils._emitted_deprecation_warnings" , set() )
@pytest.fixture
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Optional[int] , __A : int ):
snake_case__ : Any = metric_id
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = [MetricMock(_snake_case ) for metric_id in ["accuracy", "mse", "precision", "codeparrot/apps_metric"]]
def _lowercase ( self : List[str] ):
return self._metrics
monkeypatch.setattr("datasets.inspect.huggingface_hub" , HfhMock() )
@pytest.mark.parametrize(
"func, args" , [(load_metric, ("metrics/mse",)), (list_metrics, ()), (inspect_metric, ("metrics/mse", "tmp_path"))] )
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Any , snake_case_ : Any , snake_case_ : Dict ):
if "tmp_path" in args:
snake_case__ : List[str] = tuple(arg if arg != "tmp_path" else tmp_path for arg in args )
with pytest.warns(__A , match="https://huggingface.co/docs/evaluate" ):
func(*__A )
| 701 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Optional[Any] = parent
snake_case__ : str = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : Optional[Any] = min_resolution
snake_case__ : List[str] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : str = size
snake_case__ : str = do_normalize
snake_case__ : Optional[Any] = image_mean
snake_case__ : List[str] = image_std
snake_case__ : List[str] = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : Tuple = do_pad
def _lowercase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ):
if not batched:
snake_case__ : List[Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : str = image.size
else:
snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Any = int(self.size["shortest_edge"] * h / w )
snake_case__ : Any = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Any = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Tuple = self.size["shortest_edge"]
snake_case__ : int = self.size["shortest_edge"]
else:
snake_case__ : Any = []
for image in image_inputs:
snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : int = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Tuple ):
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : str ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : int ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[Any] ):
# prepare image and target
snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Tuple = json.loads(f.read() )
snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : str = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image, target and masks_path
snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 0 |
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
@staticmethod
def _lowercase ( *__A : Tuple , **__A : Optional[int] ):
pass
@is_pipeline_test
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a_ = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _lowercase ( self : Optional[int] , __A : Tuple , __A : int , __A : Optional[Any] ):
snake_case__ : Any = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
snake_case__ : Optional[Any] = [
{
"image": Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"question": "How many cats are there?",
},
{
"image": "./tests/fixtures/tests_samples/COCO/000000039769.png",
"question": "How many cats are there?",
},
]
return vqa_pipeline, examples
def _lowercase ( self : int , __A : List[Any] , __A : int ):
snake_case__ : Union[str, Any] = vqa_pipeline(UpperCAmelCase_ , top_k=1 )
self.assertEqual(
UpperCAmelCase_ , [
[{"score": ANY(UpperCAmelCase_ ), "answer": ANY(UpperCAmelCase_ )}],
[{"score": ANY(UpperCAmelCase_ ), "answer": ANY(UpperCAmelCase_ )}],
] , )
@require_torch
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Any = pipeline("visual-question-answering" , model="hf-internal-testing/tiny-vilt-random-vqa" )
snake_case__ : List[Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
snake_case__ : Union[str, Any] = "How many cats are there?"
snake_case__ : str = vqa_pipeline(image=UpperCAmelCase_ , question="How many cats are there?" , top_k=2 )
self.assertEqual(
UpperCAmelCase_ , [{"score": ANY(UpperCAmelCase_ ), "answer": ANY(UpperCAmelCase_ )}, {"score": ANY(UpperCAmelCase_ ), "answer": ANY(UpperCAmelCase_ )}] )
snake_case__ : List[str] = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
UpperCAmelCase_ , [{"score": ANY(UpperCAmelCase_ ), "answer": ANY(UpperCAmelCase_ )}, {"score": ANY(UpperCAmelCase_ ), "answer": ANY(UpperCAmelCase_ )}] )
@slow
@require_torch
def _lowercase ( self : Union[str, Any] ):
snake_case__ : int = pipeline("visual-question-answering" , model="dandelin/vilt-b32-finetuned-vqa" )
snake_case__ : Optional[Any] = "./tests/fixtures/tests_samples/COCO/000000039769.png"
snake_case__ : int = "How many cats are there?"
snake_case__ : List[Any] = vqa_pipeline(image=UpperCAmelCase_ , question=UpperCAmelCase_ , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] )
snake_case__ : Any = vqa_pipeline({"image": image, "question": question} , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}] )
snake_case__ : str = vqa_pipeline(
[{"image": image, "question": question}, {"image": image, "question": question}] , top_k=2 )
self.assertEqual(
nested_simplify(UpperCAmelCase_ , decimals=4 ) , [[{"score": 0.8_7_9_9, "answer": "2"}, {"score": 0.2_9_6, "answer": "1"}]] * 2 , )
@require_tf
@unittest.skip("Visual question answering not implemented in TF" )
def _lowercase ( self : List[str] ):
pass
| 702 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCamelCase : Tuple = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("ยก" ) , ord("ยฌ" ) + 1 ) ) + list(range(ord("ยฎ" ) , ord("รฟ" ) + 1 ) )
)
snake_case__ : Optional[int] = bs[:]
snake_case__ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
snake_case__ : Dict = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__ : Dict = set()
snake_case__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : List[Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ):
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="utf-8" ) as vocab_handle:
snake_case__ : Any = json.load(__A )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
snake_case__ : Union[str, Any] = errors # how to handle errors in decoding
snake_case__ : Any = bytes_to_unicode()
snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="utf-8" ) as merges_handle:
snake_case__ : str = merges_handle.read().split("\n" )[1:-1]
snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Optional[int] = {}
snake_case__ : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _lowercase ( self : List[Any] ):
return len(self.encoder )
def _lowercase ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Optional[Any] , __A : Optional[int] ):
if token in self.cache:
return self.cache[token]
snake_case__ : Union[str, Any] = tuple(__A )
snake_case__ : List[Any] = get_pairs(__A )
if not pairs:
return token
while True:
snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : Dict = bigram
snake_case__ : str = []
snake_case__ : Union[str, Any] = 0
while i < len(__A ):
try:
snake_case__ : Dict = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : str = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : str = tuple(__A )
snake_case__ : int = new_word
if len(__A ) == 1:
break
else:
snake_case__ : List[str] = get_pairs(__A )
snake_case__ : List[Any] = " ".join(__A )
snake_case__ : Optional[int] = word
return word
def _lowercase ( self : Optional[Any] , __A : Optional[Any] ):
snake_case__ : List[str] = []
for token in re.findall(self.pat , __A ):
snake_case__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) )
return bpe_tokens
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , __A : Optional[Any] ):
return self.decoder.get(__A )
def _lowercase ( self : Union[str, Any] , __A : Dict ):
snake_case__ : Optional[Any] = "".join(__A )
snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : str = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
snake_case__ : str = 0
with open(__A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case__ : int = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ):
snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
snake_case__ : Optional[int] = " " + text
return (text, kwargs)
def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
snake_case__ : Optional[Any] = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case__ : int = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case__ : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 25 | 0 |
import warnings
warnings.warn(
"""memory_utils has been reorganized to utils.memory. Import `find_executable_batchsize` from the main `__init__`: """
"""`from accelerate import find_executable_batch_size` to avoid this warning.""",
FutureWarning,
)
| 703 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 25 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
'''microsoft/beit-base-patch16-224-pt22k''': (
'''https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json'''
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "beit"
def __init__( self : Optional[Any] , __A : int=8_1_9_2 , __A : str=7_6_8 , __A : Tuple=1_2 , __A : Optional[Any]=1_2 , __A : Dict=3_0_7_2 , __A : Any="gelu" , __A : List[Any]=0.0 , __A : Dict=0.0 , __A : Tuple=0.0_2 , __A : Optional[Any]=1e-1_2 , __A : List[Any]=2_2_4 , __A : List[str]=1_6 , __A : Optional[int]=3 , __A : List[str]=False , __A : str=False , __A : str=False , __A : Dict=False , __A : List[Any]=0.1 , __A : str=0.1 , __A : Optional[Any]=True , __A : int=[3, 5, 7, 1_1] , __A : Optional[int]=[1, 2, 3, 6] , __A : Union[str, Any]=True , __A : Tuple=0.4 , __A : Optional[Any]=2_5_6 , __A : Tuple=1 , __A : Tuple=False , __A : Tuple=2_5_5 , **__A : Union[str, Any] , ):
super().__init__(**UpperCamelCase_ )
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : Dict = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : Any = num_attention_heads
snake_case__ : List[Any] = intermediate_size
snake_case__ : Dict = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : Optional[Any] = layer_norm_eps
snake_case__ : Optional[int] = image_size
snake_case__ : Optional[int] = patch_size
snake_case__ : List[Any] = num_channels
snake_case__ : Union[str, Any] = use_mask_token
snake_case__ : int = use_absolute_position_embeddings
snake_case__ : Union[str, Any] = use_relative_position_bias
snake_case__ : List[Any] = use_shared_relative_position_bias
snake_case__ : int = layer_scale_init_value
snake_case__ : Union[str, Any] = drop_path_rate
snake_case__ : List[Any] = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ : Dict = out_indices
snake_case__ : Dict = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ : List[str] = use_auxiliary_head
snake_case__ : str = auxiliary_loss_weight
snake_case__ : List[Any] = auxiliary_channels
snake_case__ : Union[str, Any] = auxiliary_num_convs
snake_case__ : int = auxiliary_concat_input
snake_case__ : Union[str, Any] = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = version.parse("1.11" )
@property
def _lowercase ( self : List[str] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase ( self : Optional[int] ):
return 1e-4
| 704 |
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Any = [0] * len(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
# use last results for better performance - dynamic programming
snake_case__ : Union[str, Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
snake_case__ : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
snake_case__ : int = j
return prefix_result
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return max(prefix_function(snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
import unittest
import torch
from torch import nn
from accelerate.test_utils import require_cuda
from accelerate.utils.memory import find_executable_batch_size, release_memory
def SCREAMING_SNAKE_CASE ( ):
raise RuntimeError("CUDA out of memory." )
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Tuple ):
super().__init__()
snake_case__ : Union[str, Any] = nn.Linear(3 , 4 )
snake_case__ : List[str] = nn.BatchNormad(4 )
snake_case__ : Any = nn.Linear(4 , 5 )
def _lowercase ( self : List[str] , __A : Union[str, Any] ):
return self.lineara(self.batchnorm(self.lineara(__lowerCamelCase ) ) )
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Union[str, Any] ):
snake_case__ : str = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__A : Dict ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
mock_training_loop_function()
self.assertListEqual(__lowerCamelCase , [1_2_8, 6_4, 3_2, 1_6, 8] )
def _lowercase ( self : Tuple ):
snake_case__ : Tuple = []
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__A : List[Any] , __A : List[str] ):
nonlocal batch_sizes
batch_sizes.append(__lowerCamelCase )
if batch_size != 8:
raise_fake_out_of_memory()
return batch_size, arga
snake_case__ : Tuple = mock_training_loop_function("hello" )
self.assertListEqual(__lowerCamelCase , [1_2_8, 6_4, 3_2, 1_6, 8] )
self.assertListEqual([bs, arga] , [8, "hello"] )
def _lowercase ( self : Dict ):
@find_executable_batch_size(starting_batch_size=0 )
def mock_training_loop_function(__A : List[str] ):
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def _lowercase ( self : Union[str, Any] ):
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__A : Optional[int] ):
if batch_size > 0:
raise_fake_out_of_memory()
pass
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("No executable batch size found, reached zero." , cm.exception.args[0] )
def _lowercase ( self : Dict ):
@find_executable_batch_size(starting_batch_size=1_2_8 )
def mock_training_loop_function(__A : Tuple , __A : Tuple , __A : Tuple ):
if batch_size != 8:
raise raise_fake_out_of_memory()
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function(1_2_8 , "hello" , "world" )
self.assertIn("Batch size was passed into `f`" , cm.exception.args[0] )
self.assertIn("`f(arg1='hello', arg2='world')" , cm.exception.args[0] )
def _lowercase ( self : Union[str, Any] ):
@find_executable_batch_size(starting_batch_size=1_6 )
def mock_training_loop_function(__A : List[Any] ):
raise ValueError("Oops, we had an error!" )
with self.assertRaises(__lowerCamelCase ) as cm:
mock_training_loop_function()
self.assertIn("Oops, we had an error!" , cm.exception.args[0] )
@require_cuda
def _lowercase ( self : str ):
snake_case__ : Union[str, Any] = torch.cuda.memory_allocated()
snake_case__ : List[str] = ModelForTest()
model.cuda()
self.assertGreater(torch.cuda.memory_allocated() , __lowerCamelCase )
snake_case__ : Dict = release_memory(__lowerCamelCase )
self.assertEqual(torch.cuda.memory_allocated() , __lowerCamelCase )
| 705 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[int] = get_logger()
__lowerCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case__ : str = str(jax.devices()[0] )
snake_case__ : str = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def _lowercase ( self : Optional[Any] , __A : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def _lowercase ( self : int , __A : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ : Optional[int] = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case__ : Any = {"dtype": jnp.intaa}
else:
snake_case__ : Tuple = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
snake_case__ : Optional[Any] = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ):
snake_case__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def _lowercase ( self : Tuple , __A : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def _lowercase ( self : Optional[int] , __A : pa.Table ):
snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A )
snake_case__ : Tuple = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def _lowercase ( self : Optional[Any] , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A )
snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
snake_case__ : Dict = self._consolidate(__A )
return column
def _lowercase ( self : str , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A )
snake_case__ : int = self.python_features_decoder.decode_batch(__A )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
for column_name in batch:
snake_case__ : Any = self._consolidate(batch[column_name] )
return batch
| 25 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__lowerCamelCase : List[Any] = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
snake_case__ : Union[str, Any] = {}
state_dict.pop("pixel_mean" , lowerCamelCase_ )
state_dict.pop("pixel_std" , lowerCamelCase_ )
snake_case__ : str = R'.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case__ : int = key.replace(lowerCamelCase_ , lowerCamelCase_ )
if re.match(lowerCamelCase_ , lowerCamelCase_ ):
snake_case__ : int = int(re.match(lowerCamelCase_ , lowerCamelCase_ ).group(2 ) )
if layer_nb == 0:
snake_case__ : Optional[Any] = key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
snake_case__ : List[str] = key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
snake_case__ : List[Any] = key.replace("layers.2" , "proj_out" )
snake_case__ : Any = value
snake_case__ : int = model_state_dict[
'prompt_encoder.shared_embedding.positional_embedding'
]
return model_state_dict
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Dict , snake_case_ : List[str]="ybelkada/segment-anything" ):
snake_case__ : Dict = hf_hub_download(lowerCamelCase_ , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
snake_case__ : Tuple = SamConfig()
elif "sam_vit_l" in model_name:
snake_case__ : Union[str, Any] = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
snake_case__ : Tuple = SamConfig(
vision_config=lowerCamelCase_ , )
elif "sam_vit_h" in model_name:
snake_case__ : Optional[Any] = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
snake_case__ : str = SamConfig(
vision_config=lowerCamelCase_ , )
snake_case__ : Optional[int] = torch.load(lowerCamelCase_ , map_location="cpu" )
snake_case__ : Union[str, Any] = replace_keys(lowerCamelCase_ )
snake_case__ : int = SamImageProcessor()
snake_case__ : Dict = SamProcessor(image_processor=lowerCamelCase_ )
snake_case__ : Union[str, Any] = SamModel(lowerCamelCase_ )
hf_model.load_state_dict(lowerCamelCase_ )
snake_case__ : Dict = hf_model.to("cuda" )
snake_case__ : Dict = 'https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'
snake_case__ : Any = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("RGB" )
snake_case__ : Dict = [[[400, 650]]]
snake_case__ : List[str] = [[1]]
snake_case__ : Any = processor(images=np.array(lowerCamelCase_ ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case__ : Tuple = hf_model(**lowerCamelCase_ )
snake_case__ : str = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
snake_case__ : Union[str, Any] = processor(
images=np.array(lowerCamelCase_ ) , input_points=lowerCamelCase_ , input_labels=lowerCamelCase_ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case__ : List[str] = hf_model(**lowerCamelCase_ )
snake_case__ : List[Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
snake_case__ : Tuple = ((75, 275, 1725, 850),)
snake_case__ : str = processor(images=np.array(lowerCamelCase_ ) , input_boxes=lowerCamelCase_ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case__ : Tuple = hf_model(**lowerCamelCase_ )
snake_case__ : Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
snake_case__ : str = [[[400, 650], [800, 650]]]
snake_case__ : Optional[int] = [[1, 1]]
snake_case__ : Any = processor(
images=np.array(lowerCamelCase_ ) , input_points=lowerCamelCase_ , input_labels=lowerCamelCase_ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case__ : List[str] = hf_model(**lowerCamelCase_ )
snake_case__ : Optional[int] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
__lowerCamelCase : Union[str, Any] = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
__lowerCamelCase : List[str] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 706 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 0 |
import re
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
if len(re.findall("[ATCG]" , snake_case_ ) ) != len(snake_case_ ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 707 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Tuple = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def _lowercase ( self : Dict ):
snake_case__ : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Union[str, Any] = get_activation("gelu" )
snake_case__ : int = get_activation("gelu_10" )
snake_case__ : Optional[int] = torch_builtin(__A )
snake_case__ : Dict = geluaa(__A )
snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowercase ( self : str ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__A ):
get_activation("bogus" )
with self.assertRaises(__A ):
get_activation(__A )
def _lowercase ( self : List[str] ):
snake_case__ : List[str] = get_activation("gelu" )
snake_case__ : Any = 1
snake_case__ : Union[str, Any] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
snake_case__ : int = acta.a
| 25 | 0 |
import json
import os
import unittest
from transformers import OpenAIGPTTokenizer, OpenAIGPTTokenizerFast
from transformers.models.openai.tokenization_openai import VOCAB_FILES_NAMES
from transformers.testing_utils import require_ftfy, require_spacy, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __A , unittest.TestCase ):
"""simple docstring"""
a_ = OpenAIGPTTokenizer
a_ = OpenAIGPTTokenizerFast
a_ = True
a_ = False
def _lowercase ( self : Optional[int] ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : int = [
'''l''',
'''o''',
'''w''',
'''e''',
'''r''',
'''s''',
'''t''',
'''i''',
'''d''',
'''n''',
'''w</w>''',
'''r</w>''',
'''t</w>''',
'''lo''',
'''low''',
'''er</w>''',
'''low</w>''',
'''lowest</w>''',
'''newer</w>''',
'''wider</w>''',
'''<unk>''',
]
snake_case__ : List[str] = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : int = ['''#version: 0.2''', '''l o''', '''lo w''', '''e r</w>''', '''''']
snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : List[str] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" ) as fp:
fp.write(json.dumps(__A ) )
with open(self.merges_file , "w" ) as fp:
fp.write("\n".join(__A ) )
def _lowercase ( self : Tuple , __A : Optional[Any] ):
return "lower newer", "lower newer"
def _lowercase ( self : List[str] ):
snake_case__ : str = OpenAIGPTTokenizer(self.vocab_file , self.merges_file )
snake_case__ : Any = '''lower'''
snake_case__ : Dict = ['''low''', '''er</w>''']
snake_case__ : int = tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
snake_case__ : Any = tokens + ['''<unk>''']
snake_case__ : Tuple = [1_4, 1_5, 2_0]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__A ) , __A )
def _lowercase ( self : Optional[Any] , __A : Dict=1_5 ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case__ : List[Any] = self.rust_tokenizer_class.from_pretrained(__A , **__A )
# Simple input
snake_case__ : str = '''This is a simple input'''
snake_case__ : int = ['''This is a simple input 1''', '''This is a simple input 2''']
snake_case__ : Optional[int] = ('''This is a simple input''', '''This is a pair''')
snake_case__ : List[str] = [
('''This is a simple input 1''', '''This is a simple input 2'''),
('''This is a simple pair 1''', '''This is a simple pair 2'''),
]
# Simple input tests
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="max_length" )
# Simple input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="max_length" )
# Simple input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="max_length" , )
# Pair input
self.assertRaises(__A , tokenizer_r.encode , __A , max_length=__A , padding="max_length" )
# Pair input
self.assertRaises(__A , tokenizer_r.encode_plus , __A , max_length=__A , padding="max_length" )
# Pair input
self.assertRaises(
__A , tokenizer_r.batch_encode_plus , __A , max_length=__A , padding="max_length" , )
def _lowercase ( self : Dict ):
pass
@require_ftfy
@require_spacy
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __A ):
"""simple docstring"""
pass
| 708 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : str = value
elif weight_type == "weight_g":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : str = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
snake_case__ : str = []
snake_case__ : Optional[int] = fairseq_model.state_dict()
snake_case__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[Any] = "weight"
else:
snake_case__ : Optional[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
snake_case__ : Tuple = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : str = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ):
if config_path is not None:
snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = UniSpeechSatConfig()
snake_case__ : str = ""
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ )
else:
snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ )
snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25 | 0 |
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[Any] = HfArgumentParser(__lowerCAmelCase )
snake_case__ : List[str] = parser.parse_args_into_dataclasses()[0]
snake_case__ : int = TensorFlowBenchmark(args=__lowerCAmelCase )
try:
snake_case__ : Dict = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
snake_case__ : List[Any] = "Arg --no_{0} is no longer used, please use --no-{0} instead."
snake_case__ : str = " ".join(str(__lowerCAmelCase ).split(" " )[:-1] )
snake_case__ : Dict = ""
snake_case__ : Tuple = eval(str(__lowerCAmelCase ).split(" " )[-1] )
snake_case__ : int = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(__lowerCAmelCase )
if len(__lowerCAmelCase ) > 0:
snake_case__ : Any = full_error_msg + begin_error_msg + str(__lowerCAmelCase )
raise ValueError(__lowerCAmelCase )
benchmark.run()
if __name__ == "__main__":
main()
| 709 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , ):
if attention_mask is None:
snake_case__ : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case__ : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case__ : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ )
if decoder_head_mask is None:
snake_case__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
if cross_attn_head_mask is None:
snake_case__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Any , __A : List[str]=1_3 , __A : List[Any]=7 , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : str=9_9 , __A : Optional[Any]=1_6 , __A : Optional[Any]=2 , __A : Any=4 , __A : List[Any]=4 , __A : int="relu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[Any]=2_0 , __A : Optional[Any]=2 , __A : int=1 , __A : Union[str, Any]=0 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : List[str] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : List[str] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : int = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Tuple = eos_token_id
snake_case__ : Dict = pad_token_id
snake_case__ : str = bos_token_id
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.eos_token_id # Eos Token
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case__ : int = input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Union[str, Any] = self.get_config()
snake_case__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _lowercase ( self : Dict ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : Optional[Any] , __A : int , __A : Dict ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
snake_case__ : List[Any] = inputs_dict["input_ids"]
snake_case__ : Optional[Any] = inputs_dict["attention_mask"]
snake_case__ : Union[str, Any] = inputs_dict["head_mask"]
# first forward pass
snake_case__ : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
snake_case__, snake_case__ : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case__ : Tuple = model(__A , attention_mask=__A )["last_hidden_state"]
snake_case__ : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[
"last_hidden_state"
]
# select random slice
snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) )
def _lowercase ( self : str , __A : Dict , __A : Optional[Any] ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval()
snake_case__ : Union[str, Any] = model(**__A )
snake_case__ : Tuple = outputs.encoder_last_hidden_state
snake_case__ : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_encoder()
encoder.save_pretrained(__A )
snake_case__ : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_decoder()
decoder.save_pretrained(__A )
snake_case__ : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a_ = True
a_ = True
a_ = False
a_ = False
def _lowercase ( self : int , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowercase ( self : Tuple ):
snake_case__ : Any = MaMaaaModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__A )
def _lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
snake_case__, snake_case__ : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["missing_keys"] , [] )
def _lowercase ( self : Dict ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def _lowercase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case__ : str = model_class(__A )
model.to(__A )
model.eval()
snake_case__ : str = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
snake_case__ : Optional[Any] = inputs["input_ids"]
del inputs["input_ids"]
else:
snake_case__ : Union[str, Any] = inputs["input_ids"]
snake_case__ : List[str] = inputs.get("decoder_input_ids" , __A )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , __A )
snake_case__ : Tuple = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case__ : List[Any] = wte(__A )
else:
snake_case__ : Any = wte(__A )
snake_case__ : Optional[int] = wte(__A )
with torch.no_grad():
model(**__A )[0]
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Any = input_dict["input_ids"]
snake_case__ : int = input_ids.ne(1 ).to(__A )
snake_case__ : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ )
__lowerCamelCase : Optional[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ):
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def _lowercase ( self : Optional[int] ):
snake_case__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : str = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : Optional[Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
# change to intended input
snake_case__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : List[str] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
snake_case__ : List[Any] = [
"L'affaire NSA souligne l'absence totale de dรฉbat sur le renseignement",
"Selon moi, il y a deux niveaux de rรฉponse de la part du gouvernement franรงais.",
"Lorsque Franรงois Hollande tรฉlรฉphone ร Barack Obama ou quand le ministre des affaires รฉtrangรจres Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils rรฉagissent ร une vraie dรฉcouverte, qui est celle de"
" l'ampleur de la surveillance amรฉricaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case__ : str = tokenizer(__A , padding=__A , return_tensors="pt" )
snake_case__ : Tuple = model.generate(
input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
snake_case__ : List[str] = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When Franรงois Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
snake_case__ : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 25 | 0 |
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : str = 0
for i in range(1 , 1001 ):
total += i**i
return str(lowerCamelCase__ )[-10:]
if __name__ == "__main__":
print(solution())
| 710 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] ):
snake_case__ : Optional[int] = []
for part_id in partition_order:
snake_case__ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(snake_case_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Union[str, Any] = spark.range(100 ).repartition(1 )
snake_case__ : Any = Spark(snake_case_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[Any] = spark.range(10 ).repartition(2 )
snake_case__ : Optional[Any] = [1, 0]
snake_case__ : Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions.
snake_case__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case__, snake_case__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[int] = spark.range(10 ).repartition(1 )
snake_case__ : Union[str, Any] = SparkExamplesIterable(snake_case_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
snake_case__ : Union[str, Any] = lambda snake_case_ : x.reverse()
snake_case__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] )
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case__ : Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(100 ).repartition(1 )
snake_case__ : Union[str, Any] = Spark(snake_case_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 25 | 0 |
import json
from typing import TYPE_CHECKING, List, Optional, Tuple
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_blenderbot import BlenderbotTokenizer
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCamelCase : Any = logging.get_logger(__name__)
__lowerCamelCase : Any = {
'vocab_file': 'vocab.json',
'merges_file': 'merges.txt',
'tokenizer_config_file': 'tokenizer_config.json',
}
__lowerCamelCase : List[str] = {
'vocab_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json'},
'merges_file': {'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt'},
'tokenizer_config_file': {
'facebook/blenderbot-3B': 'https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json'
},
}
__lowerCamelCase : List[Any] = {'facebook/blenderbot-3B': 128}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
a_ = BlenderbotTokenizer
def __init__( self : Dict , __A : Optional[Any]=None , __A : List[str]=None , __A : Optional[Any]=None , __A : List[Any]="replace" , __A : List[Any]="<s>" , __A : str="</s>" , __A : List[str]="</s>" , __A : List[Any]="<s>" , __A : Union[str, Any]="<unk>" , __A : Optional[Any]="<pad>" , __A : Dict="<mask>" , __A : Any=False , __A : Tuple=True , **__A : Dict , ):
super().__init__(
__A , __A , tokenizer_file=__A , errors=__A , bos_token=__A , eos_token=__A , sep_token=__A , cls_token=__A , unk_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , trim_offsets=__A , **__A , )
snake_case__ : Tuple = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("add_prefix_space" , __A ) != add_prefix_space:
snake_case__ : Optional[Any] = getattr(__A , pre_tok_state.pop("type" ) )
snake_case__ : List[Any] = add_prefix_space
snake_case__ : Optional[Any] = pre_tok_class(**__A )
snake_case__ : Any = add_prefix_space
snake_case__ : Any = "post_processor"
snake_case__ : Optional[Any] = getattr(self.backend_tokenizer , __A , __A )
if tokenizer_component_instance:
snake_case__ : Tuple = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
snake_case__ : List[Any] = tuple(state["sep"] )
if "cls" in state:
snake_case__ : Optional[int] = tuple(state["cls"] )
snake_case__ : List[str] = False
if state.get("add_prefix_space" , __A ) != add_prefix_space:
snake_case__ : List[str] = add_prefix_space
snake_case__ : int = True
if state.get("trim_offsets" , __A ) != trim_offsets:
snake_case__ : List[str] = trim_offsets
snake_case__ : Dict = True
if changes_to_apply:
snake_case__ : Tuple = getattr(__A , state.pop("type" ) )
snake_case__ : Union[str, Any] = component_class(**__A )
setattr(self.backend_tokenizer , __A , __A )
@property
# Copied from transformers.models.roberta.tokenization_roberta_fast.RobertaTokenizerFast.mask_token with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowercase ( self : Optional[int] ):
if self._mask_token is None:
if self.verbose:
logger.error("Using mask_token, but it is not set yet." )
return None
return str(self._mask_token )
@mask_token.setter
def _lowercase ( self : Optional[Any] , __A : List[Any] ):
snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else value
snake_case__ : List[str] = value
def _lowercase ( self : Optional[int] , *__A : Optional[Any] , **__A : List[str] ):
snake_case__ : int = kwargs.get("is_split_into_words" , __A )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._batch_encode_plus(*__A , **__A )
def _lowercase ( self : str , *__A : Union[str, Any] , **__A : List[Any] ):
snake_case__ : Optional[int] = kwargs.get("is_split_into_words" , __A )
assert self.add_prefix_space or not is_split_into_words, (
f'''You need to instantiate {self.__class__.__name__} with add_prefix_space=True '''
"to use it with pretokenized inputs."
)
return super()._encode_plus(*__A , **__A )
def _lowercase ( self : str , __A : str , __A : Optional[str] = None ):
snake_case__ : Optional[int] = self._tokenizer.model.save(__A , name=__A )
return tuple(__A )
def _lowercase ( self : Optional[int] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : int = [self.sep_token_id]
snake_case__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Dict , __A : List[int] , __A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _lowercase ( self : List[str] , __A : "Conversation" ):
snake_case__ : Dict = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
snake_case__ : List[Any] = " ".join(__A )
snake_case__ : List[str] = self.encode(__A )
if len(__A ) > self.model_max_length:
snake_case__ : Tuple = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 711 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 0 |
from unittest import TestCase
from datasets import Sequence, Value
from datasets.arrow_dataset import Dataset
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : Any ):
return [
{"col_1": 3, "col_2": "a"},
{"col_1": 2, "col_2": "b"},
{"col_1": 1, "col_2": "c"},
{"col_1": 0, "col_2": "d"},
]
def _lowercase ( self : Tuple ):
snake_case__ : Optional[int] = {"""col_1""": [3, 2, 1, 0], """col_2""": ["""a""", """b""", """c""", """d"""]}
return Dataset.from_dict(_a )
def _lowercase ( self : int ):
snake_case__ : str = self._create_example_records()
snake_case__ : Tuple = Dataset.from_list(_a )
self.assertListEqual(dset.column_names , ["col_1", "col_2"] )
for i, r in enumerate(_a ):
self.assertDictEqual(_a , example_records[i] )
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = self._create_example_records()
snake_case__ : Tuple = Dataset.from_list(_a )
snake_case__ : Any = Dataset.from_dict({k: [r[k] for r in example_records] for k in example_records[0]} )
self.assertEqual(dset.info , dset_from_dict.info )
def _lowercase ( self : str ): # checks what happens with missing columns
snake_case__ : List[str] = [{"""col_1""": 1}, {"""col_2""": """x"""}]
snake_case__ : Optional[int] = Dataset.from_list(_a )
self.assertDictEqual(dset[0] , {"col_1": 1} )
self.assertDictEqual(dset[1] , {"col_1": None} ) # NB: first record is used for columns
def _lowercase ( self : Optional[Any] ): # checks if the type can be inferred from the second record
snake_case__ : Union[str, Any] = [{"""col_1""": []}, {"""col_1""": [1, 2]}]
snake_case__ : List[str] = Dataset.from_list(_a )
self.assertEqual(dset.info.features["col_1"] , Sequence(Value("int64" ) ) )
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = Dataset.from_list([] )
self.assertEqual(len(_a ) , 0 )
self.assertListEqual(dset.column_names , [] )
| 712 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 25 | 0 |
import re
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
if len(re.findall("[ATCG]" , __UpperCAmelCase ) ) != len(__UpperCAmelCase ):
raise ValueError("Invalid Strand" )
return dna.translate(dna.maketrans("ATCG" , "TAGC" ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 713 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE ( snake_case_ : dict ):
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ):
snake_case__ : Optional[int] = XGBClassifier()
classifier.fit(snake_case_ , snake_case_ )
return classifier
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = load_iris()
snake_case__, snake_case__ : str = data_handling(snake_case_ )
snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split(
snake_case_ , snake_case_ , test_size=0.25 )
snake_case__ : Dict = iris["target_names"]
# Create an XGBoost Classifier from the training data
snake_case__ : Dict = xgboost(snake_case_ , snake_case_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 25 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
snake_case__ : List[str] = [int(lowerCAmelCase__ ) for i in ip_va_address.split("." ) if i.isdigit()]
return len(lowerCAmelCase__ ) == 4 and all(0 <= int(lowerCAmelCase__ ) <= 254 for octet in octets )
if __name__ == "__main__":
__lowerCamelCase : Any = input().strip()
__lowerCamelCase : List[Any] = """valid""" if is_ip_va_address_valid(ip) else """invalid"""
print(f"{ip} is a {valid_or_invalid} IP v4 address.")
| 714 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ):
snake_case__ : Tuple = args.log_outputs
snake_case__ : Union[str, Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
snake_case__ : List[str] = load_metric("wer" )
snake_case__ : List[str] = load_metric("cer" )
# compute metrics
snake_case__ : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
snake_case__ : List[str] = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
snake_case__ : Dict = F'''WER: {wer_result}\nCER: {cer_result}'''
print(snake_case_ )
with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case__ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt'''
snake_case__ : int = F'''log_{dataset_id}_targets.txt'''
with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Union[str, Any] , snake_case_ : Any ):
p.write(F'''{i}''' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(F'''{i}''' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case_ , with_indices=snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : List[Any] = "[,?.!\-\;\:\"โ%โโ๏ฟฝโโโฆโ]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case__ : Optional[int] = re.sub(snake_case_ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case__ : Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
snake_case__ : Optional[int] = " ".join(text.split(snake_case_ ) )
return text
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# load dataset
snake_case__ : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case__ : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case__ : List[Any] = feature_extractor.sampling_rate
# resample audio
snake_case__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
snake_case__ : int = 0 if torch.cuda.is_available() else -1
snake_case__ : List[str] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Any ):
snake_case__ : Union[str, Any] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case__ : Optional[int] = prediction["text"]
snake_case__ : Optional[Any] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
snake_case__ : Any = dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with ๐ค Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with ๐ค Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__lowerCamelCase : str = parser.parse_args()
main(args)
| 25 | 0 |
import socket
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Tuple = socket.socket(socket.AF_INET , socket.SOCK_STREAM )
snake_case__ : List[str] = socket.gethostname()
snake_case__ : Tuple = 12312
sock.connect((host, port) )
sock.send(b"Hello server!" )
with open("Received_file" , "wb" ) as out_file:
print("File opened" )
print("Receiving data..." )
while True:
snake_case__ : Union[str, Any] = sock.recv(1024 )
if not data:
break
out_file.write(__A )
print("Successfully received the file" )
sock.close()
print("Connection closed" )
if __name__ == "__main__":
main()
| 715 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
a_ = Features({"text": Value("string" )} )
a_ = Features({"labels": ClassLabel} )
a_ = "text"
a_ = "labels"
def _lowercase ( self : Tuple , __A : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
snake_case__ : Any = copy.deepcopy(self )
snake_case__ : Optional[Any] = self.label_schema.copy()
snake_case__ : List[str] = features[self.label_column]
snake_case__ : Dict = label_schema
return task_template
@property
def _lowercase ( self : Tuple ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 25 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
if not isinstance(_lowerCamelCase , _lowerCamelCase ):
snake_case__ : List[str] = F'''Input value of [number={number}] must be an integer'''
raise TypeError(_lowerCamelCase )
if number < 0:
return False
snake_case__ : str = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 716 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_vision_model"
def __init__( self : List[Any] , __A : Dict=1_4_0_8 , __A : Tuple=6_1_4_4 , __A : str=3_9 , __A : int=1_6 , __A : str=2_2_4 , __A : Any=1_4 , __A : Dict="gelu" , __A : List[Any]=1e-6 , __A : Any=0.0 , __A : List[Any]=1e-1_0 , __A : Union[str, Any]=True , **__A : Tuple , ):
super().__init__(**__A )
snake_case__ : List[str] = hidden_size
snake_case__ : Optional[int] = intermediate_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : str = patch_size
snake_case__ : int = image_size
snake_case__ : int = initializer_range
snake_case__ : Optional[int] = attention_dropout
snake_case__ : str = layer_norm_eps
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Tuple = qkv_bias
@classmethod
def _lowercase ( cls : List[str] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : str = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : Union[str, Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_qformer"
def __init__( self : Any , __A : Union[str, Any]=3_0_5_2_2 , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=1_2 , __A : Dict=1_2 , __A : Dict=3_0_7_2 , __A : List[str]="gelu" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Any=5_1_2 , __A : Optional[int]=0.0_2 , __A : List[str]=1e-1_2 , __A : Any=0 , __A : Optional[Any]="absolute" , __A : str=2 , __A : Any=1_4_0_8 , **__A : List[str] , ):
super().__init__(pad_token_id=__A , **__A )
snake_case__ : Dict = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : int = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : Dict = cross_attention_frequency
snake_case__ : List[str] = encoder_hidden_size
@classmethod
def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Optional[int] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : Tuple = cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : List[Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip"
a_ = True
def __init__( self : List[str] , __A : Optional[Any]=None , __A : Tuple=None , __A : Optional[int]=None , __A : Optional[Any]=3_2 , **__A : Optional[int] ):
super().__init__(**__A )
if vision_config is None:
snake_case__ : Any = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
snake_case__ : Optional[Any] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
snake_case__ : Optional[int] = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
snake_case__ : List[Any] = InstructBlipVisionConfig(**__A )
snake_case__ : Union[str, Any] = InstructBlipQFormerConfig(**__A )
snake_case__ : Dict = text_config["model_type"] if "model_type" in text_config else "opt"
snake_case__ : List[Any] = CONFIG_MAPPING[text_model_type](**__A )
snake_case__ : Union[str, Any] = self.text_config.tie_word_embeddings
snake_case__ : Tuple = self.text_config.is_encoder_decoder
snake_case__ : str = num_query_tokens
snake_case__ : Dict = self.vision_config.hidden_size
snake_case__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case__ : int = 1.0
snake_case__ : Optional[int] = 0.0_2
@classmethod
def _lowercase ( cls : List[str] , __A : InstructBlipVisionConfig , __A : InstructBlipQFormerConfig , __A : PretrainedConfig , **__A : int , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def _lowercase ( self : Optional[int] ):
snake_case__ : Any = copy.deepcopy(self.__dict__ )
snake_case__ : Optional[Any] = self.vision_config.to_dict()
snake_case__ : List[str] = self.qformer_config.to_dict()
snake_case__ : List[Any] = self.text_config.to_dict()
snake_case__ : List[Any] = self.__class__.model_type
return output
| 25 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
is_vision_available,
)
__lowerCamelCase : Tuple = {"""configuration_vit""": ["""VIT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTConfig""", """ViTOnnxConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = ["""ViTFeatureExtractor"""]
__lowerCamelCase : List[str] = ["""ViTImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = [
"""VIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTForImageClassification""",
"""ViTForMaskedImageModeling""",
"""ViTModel""",
"""ViTPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""TFViTForImageClassification""",
"""TFViTModel""",
"""TFViTPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""FlaxViTForImageClassification""",
"""FlaxViTModel""",
"""FlaxViTPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit import VIT_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTConfig, ViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_vit import ViTFeatureExtractor
from .image_processing_vit import ViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit import (
VIT_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTForImageClassification,
ViTForMaskedImageModeling,
ViTModel,
ViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vit import TFViTForImageClassification, TFViTModel, TFViTPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vit import FlaxViTForImageClassification, FlaxViTModel, FlaxViTPreTrainedModel
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 717 |
def SCREAMING_SNAKE_CASE ( snake_case_ : list ):
if len(snake_case_ ) <= 1:
return lst
snake_case__ : List[Any] = 1
while i < len(snake_case_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
snake_case__, snake_case__ : Tuple = lst[i], lst[i - 1]
i -= 1
if i == 0:
snake_case__ : Union[str, Any] = 1
return lst
if __name__ == "__main__":
__lowerCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 25 | 0 |
import numpy as np
from nltk.translate import meteor_score
import datasets
from datasets.config import importlib_metadata, version
__lowerCamelCase : Dict = version.parse(importlib_metadata.version("""nltk"""))
if NLTK_VERSION >= version.Version("""3.6.4"""):
from nltk import word_tokenize
__lowerCamelCase : Dict = "\\n@inproceedings{banarjee2005,\n title = {{METEOR}: An Automatic Metric for {MT} Evaluation with Improved Correlation with Human Judgments},\n author = {Banerjee, Satanjeev and Lavie, Alon},\n booktitle = {Proceedings of the {ACL} Workshop on Intrinsic and Extrinsic Evaluation Measures for Machine Translation and/or Summarization},\n month = jun,\n year = {2005},\n address = {Ann Arbor, Michigan},\n publisher = {Association for Computational Linguistics},\n url = {https://www.aclweb.org/anthology/W05-0909},\n pages = {65--72},\n}\n"
__lowerCamelCase : Optional[int] = "\\nMETEOR, an automatic metric for machine translation evaluation\nthat is based on a generalized concept of unigram matching between the\nmachine-produced translation and human-produced reference translations.\nUnigrams can be matched based on their surface forms, stemmed forms,\nand meanings; furthermore, METEOR can be easily extended to include more\nadvanced matching strategies. Once all generalized unigram matches\nbetween the two strings have been found, METEOR computes a score for\nthis matching using a combination of unigram-precision, unigram-recall, and\na measure of fragmentation that is designed to directly capture how\nwell-ordered the matched words in the machine translation are in relation\nto the reference.\n\nMETEOR gets an R correlation value of 0.347 with human evaluation on the Arabic\ndata and 0.331 on the Chinese data. This is shown to be an improvement on\nusing simply unigram-precision, unigram-recall and their harmonic F1\ncombination.\n"
__lowerCamelCase : Tuple = "\nComputes METEOR score of translated segments against one or more references.\nArgs:\n predictions: list of predictions to score. Each prediction\n should be a string with tokens separated by spaces.\n references: list of reference for each prediction. Each\n reference should be a string with tokens separated by spaces.\n alpha: Parameter for controlling relative weights of precision and recall. default: 0.9\n beta: Parameter for controlling shape of penalty as a function of fragmentation. default: 3\n gamma: Relative weight assigned to fragmentation penalty. default: 0.5\nReturns:\n 'meteor': meteor score.\nExamples:\n\n >>> meteor = datasets.load_metric('meteor')\n >>> predictions = [\"It is a guide to action which ensures that the military always obeys the commands of the party\"]\n >>> references = [\"It is a guide to action that ensures that the military will forever heed Party commands\"]\n >>> results = meteor.compute(predictions=predictions, references=references)\n >>> print(round(results[\"meteor\"], 4))\n 0.6944\n"
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : List[str] ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/nltk/nltk/blob/develop/nltk/translate/meteor_score.py"] , reference_urls=[
"https://www.nltk.org/api/nltk.translate.html#module-nltk.translate.meteor_score",
"https://en.wikipedia.org/wiki/METEOR",
] , )
def _lowercase ( self : int , __A : Optional[Any] ):
import nltk
nltk.download("wordnet" )
if NLTK_VERSION >= version.Version("3.6.5" ):
nltk.download("punkt" )
if NLTK_VERSION >= version.Version("3.6.6" ):
nltk.download("omw-1.4" )
def _lowercase ( self : str , __A : Any , __A : Optional[int] , __A : List[str]=0.9 , __A : Optional[Any]=3 , __A : str=0.5 ):
if NLTK_VERSION >= version.Version("3.6.5" ):
snake_case__ : List[Any] = [
meteor_score.single_meteor_score(
word_tokenize(lowerCamelCase_ ) , word_tokenize(lowerCamelCase_ ) , alpha=lowerCamelCase_ , beta=lowerCamelCase_ , gamma=lowerCamelCase_ )
for ref, pred in zip(lowerCamelCase_ , lowerCamelCase_ )
]
else:
snake_case__ : Dict = [
meteor_score.single_meteor_score(lowerCamelCase_ , lowerCamelCase_ , alpha=lowerCamelCase_ , beta=lowerCamelCase_ , gamma=lowerCamelCase_ )
for ref, pred in zip(lowerCamelCase_ , lowerCamelCase_ )
]
return {"meteor": np.mean(lowerCamelCase_ )}
| 718 |
from __future__ import annotations
import time
__lowerCamelCase : str = list[tuple[int, int]]
__lowerCamelCase : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ):
snake_case__ : Optional[int] = pos_x
snake_case__ : Dict = pos_y
snake_case__ : int = (pos_y, pos_x)
snake_case__ : Optional[int] = goal_x
snake_case__ : Tuple = goal_y
snake_case__ : str = parent
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ):
snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A )
snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A )
snake_case__ : int = [self.start]
snake_case__ : Union[str, Any] = False
def _lowercase ( self : Dict ):
while self.node_queue:
snake_case__ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ : Optional[Any] = True
return self.retrace_path(__A )
snake_case__ : int = self.get_successors(__A )
for node in successors:
self.node_queue.append(__A )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Union[str, Any] , __A : Node ):
snake_case__ : str = []
for action in delta:
snake_case__ : str = parent.pos_x + action[1]
snake_case__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) )
return successors
def _lowercase ( self : Optional[Any] , __A : Node | None ):
snake_case__ : Tuple = node
snake_case__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : Tuple = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : str , __A : int ):
snake_case__ : str = BreadthFirstSearch(__A , __A )
snake_case__ : int = BreadthFirstSearch(__A , __A )
snake_case__ : Tuple = False
def _lowercase ( self : Optional[Any] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 )
snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ : List[str] = True
return self.retrace_bidirectional_path(
__A , __A )
snake_case__ : Union[str, Any] = current_bwd_node
snake_case__ : Dict = current_fwd_node
snake_case__ : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(__A ),
self.bwd_bfs: self.bwd_bfs.get_successors(__A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Any , __A : Node , __A : Node ):
snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A )
snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowerCamelCase : str = (0, 0)
__lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : Any = time.time()
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal)
__lowerCamelCase : str = bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
__lowerCamelCase : Optional[Any] = time.time()
__lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
__lowerCamelCase : str = bd_bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 25 | 0 |
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def SCREAMING_SNAKE_CASE ( snake_case_ : Any=32 , snake_case_ : Optional[int]=10 , snake_case_ : Optional[int]=100 , snake_case_ : Optional[int]=1026 , snake_case_ : Optional[int]=True , snake_case_ : Tuple="data/tokenized_stories_train_wikitext103.jbl" , snake_case_ : List[str]="igf_context_pairs.jbl" , ):
set_seed(3 )
# generate train_data and objective_set
snake_case__, snake_case__ : Optional[int] = generate_datasets(
_A , _A , number=_A , min_len=1026 , trim=_A )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
snake_case__ : Optional[int] = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
# load pretrained model
snake_case__ : str = load_gpta("gpt2" ).to(_A )
print("computing perplexity on objective set" )
snake_case__ : List[str] = compute_perplexity(_A , _A , _A ).item()
print("perplexity on objective set:" , _A )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_A , _A , _A , _A , _A , _A , _A , _A )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Tuple=15 , snake_case_ : Tuple=128 , snake_case_ : Tuple=100 , snake_case_ : Optional[Any]="igf_model.pt" , ):
set_seed(42 )
# Load pre-trained model
snake_case__ : List[str] = GPTaLMHeadModel.from_pretrained("gpt2" )
# Initialize secondary learner to use embedding weights of model
snake_case__ : str = SecondaryLearner(_A )
# Train secondary learner
snake_case__ : int = train_secondary_learner(
_A , _A , max_epochs=_A , batch_size=_A , eval_freq=100 , igf_model_path=_A , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : str=32 , snake_case_ : Optional[int]=1000 , snake_case_ : Tuple=16 , snake_case_ : Tuple=1.0 , snake_case_ : Dict=recopy_gpta , snake_case_ : str=None , snake_case_ : Tuple=10 , snake_case_ : str="gpt2_finetuned.pt" , ):
snake_case__ : Tuple = torch.device("cuda:0" if torch.cuda.is_available() else "cpu" )
snake_case__ : Optional[int] = RandomSampler(_A )
snake_case__ : Optional[Any] = DataLoader(_A , sampler=_A )
snake_case__ : Union[str, Any] = max_steps // (len(_A )) + 1
snake_case__ : str = 0
snake_case__ : Optional[Any] = torch.zeros((1, context_len) , dtype=torch.long , device=_A )
snake_case__, snake_case__, snake_case__ : Tuple = recopy_model(_A , _A , _A )
model.train()
if secondary_learner is not None:
secondary_learner.to(_A )
secondary_learner.eval()
snake_case__ : Any = []
snake_case__ : List[str] = 0
snake_case__ : Tuple = []
snake_case__ : List[str] = []
# Compute the performance of the transformer model at the beginning
snake_case__ : int = compute_perplexity(_A , _A , _A )
test_perps.append(_A )
print("Test perplexity, step" , _A , ":" , _A )
for epoch in range(int(_A ) ):
for step, example in enumerate(_A ):
torch.cuda.empty_cache()
snake_case__ : Optional[Any] = random.randint(0 , example.size(2 ) - context_len - 1 )
snake_case__ : Tuple = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
snake_case__ : Any = model(_A , labels=_A )
snake_case__ : Optional[int] = True
if secondary_learner is not None:
snake_case__ : int = secondary_learner.forward(
torch.tensor(_A , dtype=torch.long , device=_A ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_A ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 10:
snake_case__ : Optional[int] = -1
if predicted_q < threshold:
snake_case__ : List[Any] = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
snake_case__ : List[str] = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
snake_case__ : Union[str, Any] = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
snake_case__ : int = compute_perplexity(_A , _A , _A )
test_perps.append(_A )
print("Test perplexity, step" , _A , ":" , _A )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 60:
break
if max_steps > 0 and global_step > 60:
break
# save finetuned transformer model
torch.save(model.state_dict() , _A )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Union[str, Any] = argparse.ArgumentParser(description="Fine-tune a transformer model with IGF on a language modeling task" )
# Required parameters
parser.add_argument(
"--data_dir" , default=_A , type=_A , required=_A , help="The input data dir. Should contain data files for WikiText." , )
parser.add_argument(
"--model_name_or_path" , default=_A , type=_A , required=_A , help="Path to pretrained model or model identifier from huggingface.co/models" , )
parser.add_argument(
"--data_file" , type=_A , default=_A , help=(
"A jbl file containing tokenized data which can be split as objective dataset, "
"train_dataset and test_dataset."
) , )
parser.add_argument(
"--igf_data_file" , type=_A , default=_A , help="A jbl file containing the context and information gain pairs to train secondary learner." , )
parser.add_argument(
"--output_dir" , default=_A , type=_A , required=_A , help="The output directory where the final fine-tuned model is stored." , )
parser.add_argument(
"--tokenizer_name" , default=_A , type=_A , help="Pretrained tokenizer name or path if not the same as model_name" , )
parser.add_argument("--seed" , type=_A , default=_A , help="A seed for reproducible training." )
parser.add_argument(
"--context_len" , default=32 , type=_A , help=(
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
) , )
parser.add_argument(
"--size_objective_set" , default=100 , type=_A , help="number of articles that are long enough to be used as our objective set" , )
parser.add_argument(
"--eval_freq" , default=100 , type=_A , help="secondary model evaluation is triggered at eval_freq" )
parser.add_argument("--max_steps" , default=1000 , type=_A , help="To calculate training epochs" )
parser.add_argument(
"--secondary_learner_batch_size" , default=128 , type=_A , help="batch size of training data for secondary learner" , )
parser.add_argument(
"--batch_size" , default=16 , type=_A , help="batch size of training data of language model(gpt2) " )
parser.add_argument(
"--eval_interval" , default=10 , type=_A , help=(
"decay the selectivity of our secondary learner filter from"
"1 standard deviation above average to 1 below average after 10 batches"
) , )
parser.add_argument(
"--number" , default=100 , type=_A , help="The number of examples split to be used as objective_set/test_data" )
parser.add_argument(
"--min_len" , default=1026 , type=_A , help="The minimum length of the article to be used as objective set" )
parser.add_argument(
"--secondary_learner_max_epochs" , default=15 , type=_A , help="number of epochs to train secondary learner" )
parser.add_argument("--trim" , default=_A , type=_A , help="truncate the example if it exceeds context length" )
parser.add_argument(
"--threshold" , default=1.0 , type=_A , help=(
"The threshold value used by secondary learner to filter the train_data and allow only"
" informative data as input to the model"
) , )
parser.add_argument("--finetuned_model_name" , default="gpt2_finetuned.pt" , type=_A , help="finetuned_model_name" )
parser.add_argument(
"--recopy_model" , default=_A , type=_A , help="Reset the model to the original pretrained GPT-2 weights after each iteration" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=32 , max_steps=10 , size_objective_set=100 , min_len=1026 , trim=_A , data_file="data/tokenized_stories_train_wikitext103.jbl" , igf_data_file="igf_context_pairs.jbl" , )
# Load train data for secondary learner
snake_case__ : Optional[int] = joblib.load("data/IGF_values.jbl" )
# Train secondary learner
snake_case__ : Union[str, Any] = training_secondary_learner(
_A , secondary_learner_max_epochs=15 , secondary_learner_batch_size=128 , eval_freq=100 , igf_model_path="igf_model.pt" , )
# load pretrained gpt2 model
snake_case__ : Union[str, Any] = GPTaLMHeadModel.from_pretrained("gpt2" )
set_seed(42 )
# Generate train and test data to train and evaluate gpt2 model
snake_case__, snake_case__ : int = generate_datasets(
context_len=32 , file="data/tokenized_stories_train_wikitext103.jbl" , number=100 , min_len=1026 , trim=_A )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_A , _A , _A , context_len=32 , max_steps=1000 , batch_size=16 , threshold=1.0 , recopy_model=_A , secondary_learner=_A , eval_interval=10 , finetuned_model_name="gpt2_finetuned.pt" , )
if __name__ == "__main__":
main()
| 719 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __A : Dict , __A : int=7 , __A : Optional[Any]=3 , __A : List[str]=3_0 , __A : List[Any]=4_0_0 , __A : Union[str, Any]=True , __A : List[Any]=None , __A : Optional[Any]=True , __A : Tuple=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : Any=1 / 2_5_5 , __A : Optional[int]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : str = min_resolution
snake_case__ : Tuple = max_resolution
snake_case__ : List[Any] = do_resize
snake_case__ : Dict = size
snake_case__ : List[str] = do_normalize
snake_case__ : Optional[int] = image_mean
snake_case__ : Optional[int] = image_std
snake_case__ : Any = do_rescale
snake_case__ : Optional[int] = rescale_factor
snake_case__ : int = do_pad
def _lowercase ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[int] , __A : Dict , __A : List[Any]=False ):
if not batched:
snake_case__ : List[str] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : Tuple = image.size
else:
snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case__ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case__ : List[Any] = self.size["shortest_edge"]
snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Dict = self.size["shortest_edge"]
snake_case__ : Dict = self.size["shortest_edge"]
else:
snake_case__ : str = []
for image in image_inputs:
snake_case__, snake_case__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Dict = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : Tuple = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : int ):
snake_case__ : Tuple = ConditionalDetrImageProcessingTester(self )
@property
def _lowercase ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Any ):
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : List[str] ):
snake_case__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : Union[str, Any] ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : str = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : List[Any] ):
# prepare image and target
snake_case__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Union[str, Any] = json.loads(f.read() )
snake_case__ : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : Tuple = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case__ : int = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : str ):
# prepare image, target and masks_path
snake_case__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : int = json.loads(f.read() )
snake_case__ : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : Optional[int] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : str = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 0 |
import argparse
import torch
from transformers import (
WavaVecaConfig,
WavaVecaFeatureExtractor,
WavaVecaForAudioFrameClassification,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
logging,
)
logging.set_verbosity_info()
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : List[str] ):
snake_case__ : int = WavaVecaForSequenceClassification.from_pretrained(snake_case_ , config=snake_case_ )
snake_case__ : Dict = downstream_dict["projector.weight"]
snake_case__ : List[str] = downstream_dict["projector.bias"]
snake_case__ : int = downstream_dict["model.post_net.linear.weight"]
snake_case__ : Optional[Any] = downstream_dict["model.post_net.linear.bias"]
return model
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Any , snake_case_ : Dict ):
snake_case__ : Tuple = WavaVecaForAudioFrameClassification.from_pretrained(snake_case_ , config=snake_case_ )
snake_case__ : Optional[int] = downstream_dict["model.linear.weight"]
snake_case__ : Union[str, Any] = downstream_dict["model.linear.bias"]
return model
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : List[str] , snake_case_ : int ):
snake_case__ : List[Any] = WavaVecaForXVector.from_pretrained(snake_case_ , config=snake_case_ )
snake_case__ : List[str] = downstream_dict["connector.weight"]
snake_case__ : List[str] = downstream_dict["connector.bias"]
for i, kernel_size in enumerate(hf_config.tdnn_kernel ):
snake_case__ : List[str] = downstream_dict[
F'''model.framelevel_feature_extractor.module.{i}.kernel.weight'''
]
snake_case__ : Tuple = downstream_dict[F'''model.framelevel_feature_extractor.module.{i}.kernel.bias''']
snake_case__ : Optional[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.weight"]
snake_case__ : Union[str, Any] = downstream_dict["model.utterancelevel_feature_extractor.linear1.bias"]
snake_case__ : Tuple = downstream_dict["model.utterancelevel_feature_extractor.linear2.weight"]
snake_case__ : Optional[Any] = downstream_dict["model.utterancelevel_feature_extractor.linear2.bias"]
snake_case__ : Any = downstream_dict["objective.W"]
return model
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : List[str] , snake_case_ : Optional[Any] ):
snake_case__ : List[Any] = torch.load(snake_case_ , map_location="cpu" )
snake_case__ : List[str] = checkpoint["Downstream"]
snake_case__ : Tuple = WavaVecaConfig.from_pretrained(snake_case_ )
snake_case__ : List[Any] = WavaVecaFeatureExtractor.from_pretrained(
snake_case_ , return_attention_mask=snake_case_ , do_normalize=snake_case_ )
snake_case__ : Optional[Any] = hf_config.architectures[0]
if arch.endswith("ForSequenceClassification" ):
snake_case__ : Any = convert_classification(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith("ForAudioFrameClassification" ):
snake_case__ : Dict = convert_diarization(snake_case_ , snake_case_ , snake_case_ )
elif arch.endswith("ForXVector" ):
snake_case__ : Optional[int] = convert_xvector(snake_case_ , snake_case_ , snake_case_ )
else:
raise NotImplementedError(F'''S3PRL weights conversion is not supported for {arch}''' )
if hf_config.use_weighted_layer_sum:
snake_case__ : Union[str, Any] = checkpoint["Featurizer"]["weights"]
hf_feature_extractor.save_pretrained(snake_case_ )
hf_model.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument(
"""--base_model_name""", default=None, type=str, help="""Name of the huggingface pretrained base model."""
)
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to the huggingface classifier config.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to the s3prl checkpoint.""")
parser.add_argument("""--model_dump_path""", default=None, type=str, help="""Path to the final converted model.""")
__lowerCamelCase : List[Any] = parser.parse_args()
convert_saprl_checkpoint(args.base_model_name, args.config_path, args.checkpoint_path, args.model_dump_path)
| 720 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__lowerCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__lowerCamelCase : str = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using KullbackโLeibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__lowerCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def _lowercase ( self : Union[str, Any] , __A : Dict , __A : List[str] , __A : int=None , __A : List[Any]=None , __A : Optional[int]=None , __A : List[Any]=None , __A : Union[str, Any]="auto" , __A : Optional[Any]=-1 , __A : Optional[Any]=0.9 , __A : Any=5 , __A : List[Any]=5_0_0 , __A : Tuple="gpt2-large" , __A : Optional[Any]=-1 , __A : str=1_0_2_4 , __A : Tuple=2_5 , __A : str=5 , __A : Optional[int]=True , __A : Any=2_5 , ):
snake_case__ : List[Any] = compute_mauve(
p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , )
return out
| 25 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPImageProcessor, CLIPProcessor
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : int ):
snake_case__ : Dict = tempfile.mkdtemp()
# fmt: off
snake_case__ : Optional[int] = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
snake_case__ : Tuple = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
snake_case__ : int = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
snake_case__ : List[str] = {"""unk_token""": """<unk>"""}
snake_case__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : Optional[Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_lowercase ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(_lowercase ) )
snake_case__ : Optional[Any] = {
"""do_resize""": True,
"""size""": 2_0,
"""do_center_crop""": True,
"""crop_size""": 1_8,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
snake_case__ : Tuple = os.path.join(self.tmpdirname , _lowercase )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(_lowercase , _lowercase )
def _lowercase ( self : List[Any] , **__A : List[str] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **_lowercase )
def _lowercase ( self : List[Any] , **__A : str ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **_lowercase )
def _lowercase ( self : Union[str, Any] , **__A : int ):
return CLIPImageProcessor.from_pretrained(self.tmpdirname , **_lowercase )
def _lowercase ( self : Optional[Any] ):
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Any ):
snake_case__ : Tuple = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case__ : Optional[int] = [Image.fromarray(np.moveaxis(_lowercase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Any ):
snake_case__ : Any = self.get_tokenizer()
snake_case__ : Union[str, Any] = self.get_rust_tokenizer()
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : Dict = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_slow.save_pretrained(self.tmpdirname )
snake_case__ : Optional[Any] = CLIPProcessor.from_pretrained(self.tmpdirname , use_fast=_lowercase )
snake_case__ : Optional[int] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
processor_fast.save_pretrained(self.tmpdirname )
snake_case__ : int = CLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , _lowercase )
self.assertIsInstance(processor_fast.tokenizer , _lowercase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , _lowercase )
self.assertIsInstance(processor_fast.image_processor , _lowercase )
def _lowercase ( self : Tuple ):
snake_case__ : Optional[Any] = CLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Tuple = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case__ : List[str] = self.get_image_processor(do_normalize=_lowercase , padding_value=1.0 )
snake_case__ : int = CLIPProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=_lowercase , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , _lowercase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , _lowercase )
def _lowercase ( self : Dict ):
snake_case__ : Optional[Any] = self.get_image_processor()
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : Dict = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case__ : str = self.prepare_image_inputs()
snake_case__ : Union[str, Any] = image_processor(_lowercase , return_tensors="np" )
snake_case__ : Any = processor(images=_lowercase , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : List[str] = self.get_tokenizer()
snake_case__ : Optional[int] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case__ : int = """lower newer"""
snake_case__ : List[str] = processor(text=_lowercase )
snake_case__ : Optional[int] = tokenizer(_lowercase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : Any ):
snake_case__ : int = self.get_image_processor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : Union[str, Any] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case__ : str = """lower newer"""
snake_case__ : Tuple = self.prepare_image_inputs()
snake_case__ : Dict = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(_lowercase ):
processor()
def _lowercase ( self : List[str] ):
snake_case__ : Tuple = self.get_image_processor()
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : Optional[int] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case__ : Tuple = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : str = processor.batch_decode(_lowercase )
snake_case__ : Any = tokenizer.batch_decode(_lowercase )
self.assertListEqual(_lowercase , _lowercase )
def _lowercase ( self : str ):
snake_case__ : Dict = self.get_image_processor()
snake_case__ : int = self.get_tokenizer()
snake_case__ : List[Any] = CLIPProcessor(tokenizer=_lowercase , image_processor=_lowercase )
snake_case__ : Union[str, Any] = """lower newer"""
snake_case__ : Optional[Any] = self.prepare_image_inputs()
snake_case__ : Optional[Any] = processor(text=_lowercase , images=_lowercase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
| 721 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCamelCase : Union[str, Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCamelCase : List[Any] = concatenate_datasets
__lowerCamelCase : List[str] = DownloadConfig
__lowerCamelCase : Union[str, Any] = DownloadManager
__lowerCamelCase : str = DownloadMode
__lowerCamelCase : Union[str, Any] = DownloadConfig
__lowerCamelCase : List[str] = DownloadMode
__lowerCamelCase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 25 | 0 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : Tuple = SwinvaConfig()
snake_case__ : Dict = swinva_name.split("_" )
snake_case__ : Dict = name_split[1]
if "to" in name_split[3]:
snake_case__ : Optional[int] = int(name_split[3][-3:] )
else:
snake_case__ : Optional[Any] = int(name_split[3] )
if "to" in name_split[2]:
snake_case__ : Optional[int] = int(name_split[2][-2:] )
else:
snake_case__ : Optional[int] = int(name_split[2][6:] )
if model_size == "tiny":
snake_case__ : Dict = 96
snake_case__ : List[str] = (2, 2, 6, 2)
snake_case__ : List[Any] = (3, 6, 12, 24)
elif model_size == "small":
snake_case__ : str = 96
snake_case__ : Union[str, Any] = (2, 2, 18, 2)
snake_case__ : Optional[Any] = (3, 6, 12, 24)
elif model_size == "base":
snake_case__ : List[str] = 128
snake_case__ : Union[str, Any] = (2, 2, 18, 2)
snake_case__ : Optional[int] = (4, 8, 16, 32)
else:
snake_case__ : str = 192
snake_case__ : str = (2, 2, 18, 2)
snake_case__ : List[Any] = (6, 12, 24, 48)
if "to" in swinva_name:
snake_case__ : Optional[Any] = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
snake_case__ : int = 21841
snake_case__ : int = "huggingface/label-files"
snake_case__ : Union[str, Any] = "imagenet-22k-id2label.json"
snake_case__ : int = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
snake_case__ : Union[str, Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
snake_case__ : Union[str, Any] = idalabel
snake_case__ : Union[str, Any] = {v: k for k, v in idalabel.items()}
else:
snake_case__ : List[Any] = 1000
snake_case__ : List[str] = "huggingface/label-files"
snake_case__ : List[Any] = "imagenet-1k-id2label.json"
snake_case__ : str = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type="dataset" ) , "r" ) )
snake_case__ : List[Any] = {int(snake_case__ ): v for k, v in idalabel.items()}
snake_case__ : Dict = idalabel
snake_case__ : int = {v: k for k, v in idalabel.items()}
snake_case__ : List[str] = img_size
snake_case__ : Any = num_classes
snake_case__ : Dict = embed_dim
snake_case__ : Union[str, Any] = depths
snake_case__ : Optional[Any] = num_heads
snake_case__ : Any = window_size
return config
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
if "patch_embed.proj" in name:
snake_case__ : Optional[int] = name.replace("patch_embed.proj" , "embeddings.patch_embeddings.projection" )
if "patch_embed.norm" in name:
snake_case__ : List[str] = name.replace("patch_embed.norm" , "embeddings.norm" )
if "layers" in name:
snake_case__ : Optional[int] = "encoder." + name
if "attn.proj" in name:
snake_case__ : Optional[int] = name.replace("attn.proj" , "attention.output.dense" )
if "attn" in name:
snake_case__ : Union[str, Any] = name.replace("attn" , "attention.self" )
if "norm1" in name:
snake_case__ : str = name.replace("norm1" , "layernorm_before" )
if "norm2" in name:
snake_case__ : Optional[int] = name.replace("norm2" , "layernorm_after" )
if "mlp.fc1" in name:
snake_case__ : Optional[int] = name.replace("mlp.fc1" , "intermediate.dense" )
if "mlp.fc2" in name:
snake_case__ : int = name.replace("mlp.fc2" , "output.dense" )
if "q_bias" in name:
snake_case__ : Dict = name.replace("q_bias" , "query.bias" )
if "k_bias" in name:
snake_case__ : int = name.replace("k_bias" , "key.bias" )
if "v_bias" in name:
snake_case__ : List[str] = name.replace("v_bias" , "value.bias" )
if "cpb_mlp" in name:
snake_case__ : int = name.replace("cpb_mlp" , "continuous_position_bias_mlp" )
if name == "norm.weight":
snake_case__ : str = "layernorm.weight"
if name == "norm.bias":
snake_case__ : str = "layernorm.bias"
if "head" in name:
snake_case__ : List[str] = name.replace("head" , "classifier" )
else:
snake_case__ : List[str] = "swinv2." + name
return name
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : List[Any] ):
for key in orig_state_dict.copy().keys():
snake_case__ : Optional[Any] = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
snake_case__ : Optional[int] = key.split("." )
snake_case__ : Any = int(key_split[1] )
snake_case__ : Optional[Any] = int(key_split[3] )
snake_case__ : Optional[int] = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
snake_case__ : int = val[:dim, :]
snake_case__ : Dict = val[dim : dim * 2, :]
snake_case__ : Optional[int] = val[-dim:, :]
else:
snake_case__ : Union[str, Any] = val[:dim]
snake_case__ : Optional[Any] = val[
dim : dim * 2
]
snake_case__ : Any = val[-dim:]
else:
snake_case__ : Dict = val
return orig_state_dict
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Tuple ):
snake_case__ : Any = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
snake_case__ : int = get_swinva_config(snake_case__ )
snake_case__ : List[Any] = SwinvaForImageClassification(snake_case__ )
model.eval()
snake_case__ : Tuple = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
snake_case__ : Union[str, Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case__ : List[Any] = AutoImageProcessor.from_pretrained("microsoft/{}".format(swinva_name.replace("_" , "-" ) ) )
snake_case__ : Union[str, Any] = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
snake_case__ : List[Any] = image_processor(images=snake_case__ , return_tensors="pt" )
snake_case__ : List[str] = timm_model(inputs["pixel_values"] )
snake_case__ : Tuple = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1E-3 )
print(F'''Saving model {swinva_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case__ )
print(F'''Saving image processor to {pytorch_dump_folder_path}''' )
image_processor.save_pretrained(snake_case__ )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization="nandwalritik" , commit_message="Add model" , )
if __name__ == "__main__":
__lowerCamelCase : List[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swinv2_name""",
default="""swinv2_tiny_patch4_window8_256""",
type=str,
help="""Name of the Swinv2 timm model you\'d like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
__lowerCamelCase : Tuple = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path)
| 700 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : str = [True] * limit
snake_case__ : str = False
snake_case__ : str = False
snake_case__ : str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
snake_case__ : Optional[Any] = i * 2
while index < limit:
snake_case__ : Union[str, Any] = False
snake_case__ : Any = index + i
snake_case__ : Optional[Any] = [2]
for i in range(3 , snake_case_ , 2 ):
if is_prime[i]:
primes.append(snake_case_ )
return primes
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000000 ):
snake_case__ : Optional[int] = prime_sieve(snake_case_ )
snake_case__ : List[Any] = 0
snake_case__ : List[str] = 0
for i in range(len(snake_case_ ) ):
for j in range(i + length , len(snake_case_ ) ):
snake_case__ : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
snake_case__ : Tuple = j - i
snake_case__ : str = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 25 | 0 |
import unittest
from transformers import DonutProcessor
__lowerCamelCase : Any = """naver-clova-ix/donut-base"""
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
snake_case__ : int = DonutProcessor.from_pretrained(__A )
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = {
"name": "John Doe",
"age": "99",
"city": "Atlanta",
"state": "GA",
"zip": "30301",
"phone": "123-4567",
"nicknames": [{"nickname": "Johnny"}, {"nickname": "JD"}],
}
snake_case__ : Any = (
"<s_name>John Doe</s_name><s_age>99</s_age><s_city>Atlanta</s_city>"
"<s_state>GA</s_state><s_zip>30301</s_zip><s_phone>123-4567</s_phone>"
"<s_nicknames><s_nickname>Johnny</s_nickname>"
"<sep/><s_nickname>JD</s_nickname></s_nicknames>"
)
snake_case__ : Any = self.processor.tokenajson(__A )
self.assertDictEqual(__A , __A )
| 701 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Optional[Any] = parent
snake_case__ : str = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : Optional[Any] = min_resolution
snake_case__ : List[str] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : str = size
snake_case__ : str = do_normalize
snake_case__ : Optional[Any] = image_mean
snake_case__ : List[str] = image_std
snake_case__ : List[str] = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : Tuple = do_pad
def _lowercase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ):
if not batched:
snake_case__ : List[Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : str = image.size
else:
snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Any = int(self.size["shortest_edge"] * h / w )
snake_case__ : Any = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Any = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Tuple = self.size["shortest_edge"]
snake_case__ : int = self.size["shortest_edge"]
else:
snake_case__ : Any = []
for image in image_inputs:
snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : int = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Tuple ):
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : str ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : int ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[Any] ):
# prepare image and target
snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Tuple = json.loads(f.read() )
snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : str = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image, target and masks_path
snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 0 |
import os
from glob import glob
import imageio
import torch
import torchvision
import wandb
from img_processing import custom_to_pil, loop_post_process, preprocess, preprocess_vqgan
from loaders import load_vqgan
from PIL import Image
from torch import nn
from transformers import CLIPModel, CLIPTokenizerFast
from utils import get_device, get_timestamp, show_pil
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Optional[int] , __A : Any = "cpu" , __A : List[str] = "openai/clip-vit-large-patch14" ):
snake_case__ : List[str] = device
snake_case__ : List[str] = CLIPTokenizerFast.from_pretrained(__A )
snake_case__ : Optional[Any] = [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3]
snake_case__ : Any = [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1]
snake_case__ : Dict = torchvision.transforms.Normalize(self.image_mean , self.image_std )
snake_case__ : Dict = torchvision.transforms.Resize(2_2_4 )
snake_case__ : Tuple = torchvision.transforms.CenterCrop(2_2_4 )
def _lowercase ( self : Union[str, Any] , __A : Dict ):
snake_case__ : List[str] = self.resize(__A )
snake_case__ : int = self.center_crop(__A )
snake_case__ : Optional[int] = self.normalize(__A )
return images
def __call__( self : str , __A : Union[str, Any]=None , __A : List[Any]=None , **__A : Dict ):
snake_case__ : str = self.tokenizer(text=__A , **__A )
snake_case__ : Tuple = self.preprocess_img(__A )
snake_case__ : Union[str, Any] = {key: value.to(self.device ) for (key, value) in encoding.items()}
return encoding
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : List[str]=1_0 , __A : List[str]=0.0_1 , __A : int=None , __A : Tuple=None , __A : Tuple=None , __A : Union[str, Any]=None , __A : str=None , __A : Dict=None , __A : List[Any]=False , __A : Tuple=True , __A : int="image" , __A : List[Any]=True , __A : Tuple=False , __A : Dict=False , __A : List[Any]=False , ):
super().__init__()
snake_case__ : Union[str, Any] = None
snake_case__ : Union[str, Any] = device if device else get_device()
if vqgan:
snake_case__ : Any = vqgan
else:
snake_case__ : List[str] = load_vqgan(self.device , conf_path=__A , ckpt_path=__A )
self.vqgan.eval()
if clip:
snake_case__ : Optional[Any] = clip
else:
snake_case__ : int = CLIPModel.from_pretrained("openai/clip-vit-base-patch32" )
self.clip.to(self.device )
snake_case__ : List[Any] = ProcessorGradientFlow(device=self.device )
snake_case__ : str = iterations
snake_case__ : Any = lr
snake_case__ : int = log
snake_case__ : List[str] = make_grid
snake_case__ : List[Any] = return_val
snake_case__ : List[Any] = quantize
snake_case__ : Union[str, Any] = self.vqgan.decoder.z_shape
def _lowercase ( self : Tuple , __A : str=None , __A : Dict=None , __A : str=5 , __A : Dict=True ):
snake_case__ : List[str] = []
if output_path is None:
snake_case__ : List[Any] = "./animation.gif"
if input_path is None:
snake_case__ : List[str] = self.save_path
snake_case__ : List[Any] = sorted(glob(input_path + "/*" ) )
if not len(__A ):
raise ValueError(
"No images found in save path, aborting (did you pass save_intermediate=True to the generate"
" function?)" )
if len(__A ) == 1:
print("Only one image found in save path, (did you pass save_intermediate=True to the generate function?)" )
snake_case__ : Dict = total_duration / len(__A )
snake_case__ : Optional[int] = [frame_duration] * len(__A )
if extend_frames:
snake_case__ : int = 1.5
snake_case__ : List[Any] = 3
for file_name in paths:
if file_name.endswith(".png" ):
images.append(imageio.imread(__A ) )
imageio.mimsave(__A , __A , duration=__A )
print(f'''gif saved to {output_path}''' )
def _lowercase ( self : str , __A : List[str]=None , __A : str=None ):
if not (path or img):
raise ValueError("Input either path or tensor" )
if img is not None:
raise NotImplementedError
snake_case__ : List[str] = preprocess(Image.open(__A ) , target_image_size=2_5_6 ).to(self.device )
snake_case__ : Optional[Any] = preprocess_vqgan(__A )
snake_case__, *snake_case__ : Optional[Any] = self.vqgan.encode(__A )
return z
def _lowercase ( self : Optional[Any] , __A : int ):
snake_case__ : Union[str, Any] = self.latent.detach().requires_grad_()
snake_case__ : Optional[int] = base_latent + transform_vector
if self.quantize:
snake_case__, *snake_case__ : Optional[int] = self.vqgan.quantize(__A )
else:
snake_case__ : Tuple = trans_latent
return self.vqgan.decode(__A )
def _lowercase ( self : List[Any] , __A : List[str] , __A : List[str] , __A : Union[str, Any]=None ):
snake_case__ : List[str] = self.clip_preprocessor(text=__A , images=__A , return_tensors="pt" , padding=__A )
snake_case__ : str = self.clip(**__A )
snake_case__ : Any = clip_outputs.logits_per_image
if weights is not None:
snake_case__ : List[Any] = similarity_logits * weights
return similarity_logits.sum()
def _lowercase ( self : Union[str, Any] , __A : Optional[Any] , __A : Optional[int] , __A : Any ):
snake_case__ : str = self._get_clip_similarity(pos_prompts["prompts"] , __A , weights=(1 / pos_prompts["weights"]) )
if neg_prompts:
snake_case__ : Optional[Any] = self._get_clip_similarity(neg_prompts["prompts"] , __A , weights=neg_prompts["weights"] )
else:
snake_case__ : Optional[Any] = torch.tensor([1] , device=self.device )
snake_case__ : Dict = -torch.log(__A ) + torch.log(__A )
return loss
def _lowercase ( self : List[Any] , __A : Optional[Any] , __A : Union[str, Any] , __A : List[Any] ):
snake_case__ : Dict = torch.randn_like(self.latent , requires_grad=__A , device=self.device )
snake_case__ : Dict = torch.optim.Adam([vector] , lr=self.lr )
for i in range(self.iterations ):
optim.zero_grad()
snake_case__ : Union[str, Any] = self._add_vector(__A )
snake_case__ : Tuple = loop_post_process(__A )
snake_case__ : List[Any] = self._get_CLIP_loss(__A , __A , __A )
print("CLIP loss" , __A )
if self.log:
wandb.log({"CLIP Loss": clip_loss} )
clip_loss.backward(retain_graph=__A )
optim.step()
if self.return_val == "image":
yield custom_to_pil(transformed_img[0] )
else:
yield vector
def _lowercase ( self : Tuple , __A : Union[str, Any] , __A : Tuple , __A : Dict ):
wandb.init(reinit=__A , project="face-editor" )
wandb.config.update({"Positive Prompts": positive_prompts} )
wandb.config.update({"Negative Prompts": negative_prompts} )
wandb.config.update({"lr": self.lr, "iterations": self.iterations} )
if image_path:
snake_case__ : Dict = Image.open(__A )
snake_case__ : List[Any] = image.resize((2_5_6, 2_5_6) )
wandb.log("Original Image" , wandb.Image(__A ) )
def _lowercase ( self : int , __A : List[str] ):
if not prompts:
return []
snake_case__ : Optional[Any] = []
snake_case__ : List[str] = []
if isinstance(__A , __A ):
snake_case__ : List[Any] = [prompt.strip() for prompt in prompts.split("|" )]
for prompt in prompts:
if isinstance(__A , (tuple, list) ):
snake_case__ : List[str] = prompt[0]
snake_case__ : Optional[Any] = float(prompt[1] )
elif ":" in prompt:
snake_case__, snake_case__ : Dict = prompt.split(":" )
snake_case__ : Any = float(__A )
else:
snake_case__ : List[Any] = prompt
snake_case__ : Optional[int] = 1.0
processed_prompts.append(__A )
weights.append(__A )
return {
"prompts": processed_prompts,
"weights": torch.tensor(__A , device=self.device ),
}
def _lowercase ( self : Tuple , __A : int , __A : Tuple=None , __A : Any=None , __A : Optional[Any]=True , __A : List[str]=False , __A : Optional[int]=True , __A : str=True , __A : Union[str, Any]=None , ):
if image_path:
snake_case__ : str = self._get_latent(__A )
else:
snake_case__ : List[str] = torch.randn(self.latent_dim , device=self.device )
if self.log:
self._init_logging(__A , __A , __A )
assert pos_prompts, "You must provide at least one positive prompt."
snake_case__ : List[str] = self.process_prompts(__A )
snake_case__ : List[Any] = self.process_prompts(__A )
if save_final and save_path is None:
snake_case__ : Any = os.path.join("./outputs/" , "_".join(pos_prompts["prompts"] ) )
if not os.path.exists(__A ):
os.makedirs(__A )
else:
snake_case__ : Dict = save_path + "_" + get_timestamp()
os.makedirs(__A )
snake_case__ : Union[str, Any] = save_path
snake_case__ : Optional[Any] = self.vqgan.decode(self.latent )[0]
if show_intermediate:
print("Original Image" )
show_pil(custom_to_pil(__A ) )
snake_case__ : Dict = loop_post_process(__A )
for iter, transformed_img in enumerate(self._optimize_CLIP(__A , __A , __A ) ):
if show_intermediate:
show_pil(__A )
if save_intermediate:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}.png''' ) )
if self.log:
wandb.log({"Image": wandb.Image(__A )} )
if show_final:
show_pil(__A )
if save_final:
transformed_img.save(os.path.join(self.save_path , f'''iter_{iter:03d}_final.png''' ) )
| 702 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCamelCase : Tuple = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("ยก" ) , ord("ยฌ" ) + 1 ) ) + list(range(ord("ยฎ" ) , ord("รฟ" ) + 1 ) )
)
snake_case__ : Optional[int] = bs[:]
snake_case__ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
snake_case__ : Dict = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__ : Dict = set()
snake_case__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : List[Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ):
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="utf-8" ) as vocab_handle:
snake_case__ : Any = json.load(__A )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
snake_case__ : Union[str, Any] = errors # how to handle errors in decoding
snake_case__ : Any = bytes_to_unicode()
snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="utf-8" ) as merges_handle:
snake_case__ : str = merges_handle.read().split("\n" )[1:-1]
snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Optional[int] = {}
snake_case__ : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _lowercase ( self : List[Any] ):
return len(self.encoder )
def _lowercase ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Optional[Any] , __A : Optional[int] ):
if token in self.cache:
return self.cache[token]
snake_case__ : Union[str, Any] = tuple(__A )
snake_case__ : List[Any] = get_pairs(__A )
if not pairs:
return token
while True:
snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : Dict = bigram
snake_case__ : str = []
snake_case__ : Union[str, Any] = 0
while i < len(__A ):
try:
snake_case__ : Dict = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : str = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : str = tuple(__A )
snake_case__ : int = new_word
if len(__A ) == 1:
break
else:
snake_case__ : List[str] = get_pairs(__A )
snake_case__ : List[Any] = " ".join(__A )
snake_case__ : Optional[int] = word
return word
def _lowercase ( self : Optional[Any] , __A : Optional[Any] ):
snake_case__ : List[str] = []
for token in re.findall(self.pat , __A ):
snake_case__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) )
return bpe_tokens
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , __A : Optional[Any] ):
return self.decoder.get(__A )
def _lowercase ( self : Union[str, Any] , __A : Dict ):
snake_case__ : Optional[Any] = "".join(__A )
snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : str = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
snake_case__ : str = 0
with open(__A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case__ : int = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ):
snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
snake_case__ : Optional[int] = " " + text
return (text, kwargs)
def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
snake_case__ : Optional[Any] = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case__ : int = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case__ : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 25 | 0 |
from __future__ import annotations
__lowerCamelCase : Optional[Any] = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def SCREAMING_SNAKE_CASE ( snake_case_ : list[list[int]] , snake_case_ : list[int] , snake_case_ : list[int] , snake_case_ : int , snake_case_ : list[list[int]] , ):
snake_case__ : Any = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the reference grid
snake_case__ : Optional[int] = 1
snake_case__ : Tuple = [
[0 for col in range(len(grid[0] ) )] for row in range(len(_lowerCamelCase ) )
] # the action grid
snake_case__ : List[str] = init[0]
snake_case__ : Optional[Any] = init[1]
snake_case__ : Dict = 0
snake_case__ : Optional[int] = g + heuristic[x][y] # cost from starting cell to destination cell
snake_case__ : List[Any] = [[f, g, x, y]]
snake_case__ : List[Any] = False # flag that is set when search is complete
snake_case__ : Optional[Any] = False # flag set if we can't find expand
while not found and not resign:
if len(_lowerCamelCase ) == 0:
raise ValueError("Algorithm is unable to find solution" )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
snake_case__ : Dict = cell.pop()
snake_case__ : Any = next_cell[2]
snake_case__ : str = next_cell[3]
snake_case__ : str = next_cell[1]
if x == goal[0] and y == goal[1]:
snake_case__ : Optional[int] = True
else:
for i in range(len(_lowerCamelCase ) ): # to try out different valid actions
snake_case__ : List[Any] = x + DIRECTIONS[i][0]
snake_case__ : List[Any] = y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(_lowerCamelCase ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
snake_case__ : Any = g + cost
snake_case__ : List[str] = ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
snake_case__ : List[str] = 1
snake_case__ : Optional[int] = i
snake_case__ : Tuple = []
snake_case__ : str = goal[0]
snake_case__ : int = goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
snake_case__ : int = x - DIRECTIONS[action[x][y]][0]
snake_case__ : Tuple = y - DIRECTIONS[action[x][y]][1]
snake_case__ : List[Any] = xa
snake_case__ : Optional[int] = ya
invpath.append([x, y] )
snake_case__ : Tuple = []
for i in range(len(_lowerCamelCase ) ):
path.append(invpath[len(_lowerCamelCase ) - 1 - i] )
return path, action
if __name__ == "__main__":
__lowerCamelCase : int = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
__lowerCamelCase : List[str] = [0, 0]
# all coordinates are given in format [y,x]
__lowerCamelCase : Union[str, Any] = [len(grid) - 1, len(grid[0]) - 1]
__lowerCamelCase : Optional[Any] = 1
# the cost map which pushes the path closer to the goal
__lowerCamelCase : List[str] = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
__lowerCamelCase : Optional[int] = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
__lowerCamelCase : str = 99
__lowerCamelCase : Optional[int] = search(grid, init, goal, cost, heuristic)
print("""ACTION MAP""")
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 703 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 25 | 0 |
import fire
from utils import calculate_rouge, save_json
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : Dict=None , **snake_case_ : Any ):
snake_case__ : int = [x.strip() for x in open(snake_case_ ).readlines()]
snake_case__ : Tuple = [x.strip() for x in open(snake_case_ ).readlines()][: len(snake_case_ )]
snake_case__ : Tuple = calculate_rouge(snake_case_ , snake_case_ , **snake_case_ )
if save_path is not None:
save_json(snake_case_ , snake_case_ , indent=snake_case_ )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 704 |
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Any = [0] * len(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
# use last results for better performance - dynamic programming
snake_case__ : Union[str, Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
snake_case__ : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
snake_case__ : int = j
return prefix_result
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return max(prefix_function(snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
import itertools
import json
import os
import unittest
from transformers import AddedToken, LongformerTokenizer, LongformerTokenizerFast
from transformers.models.longformer.tokenization_longformer import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __a , unittest.TestCase ):
"""simple docstring"""
a_ = LongformerTokenizer
a_ = True
a_ = LongformerTokenizerFast
a_ = True
def _lowercase ( self : Any ):
super().setUp()
# Adapted from Sennrich et al. 2015 and https://github.com/rsennrich/subword-nmt
snake_case__ : List[Any] = [
"""l""",
"""o""",
"""w""",
"""e""",
"""r""",
"""s""",
"""t""",
"""i""",
"""d""",
"""n""",
"""\u0120""",
"""\u0120l""",
"""\u0120n""",
"""\u0120lo""",
"""\u0120low""",
"""er""",
"""\u0120lowest""",
"""\u0120newer""",
"""\u0120wider""",
"""<unk>""",
]
snake_case__ : List[str] = dict(zip(a_ , range(len(a_ ) ) ) )
snake_case__ : List[Any] = ["""#version: 0.2""", """\u0120 l""", """\u0120l o""", """\u0120lo w""", """e r""", """"""]
snake_case__ : Any = {"""unk_token""": """<unk>"""}
snake_case__ : int = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : Tuple = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(a_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(a_ ) )
def _lowercase ( self : Union[str, Any] , **__A : List[Any] ):
kwargs.update(self.special_tokens_map )
return self.tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
def _lowercase ( self : str , **__A : str ):
kwargs.update(self.special_tokens_map )
return self.rust_tokenizer_class.from_pretrained(self.tmpdirname , **a_ )
def _lowercase ( self : Tuple , __A : Tuple ):
snake_case__ : int = """lower newer"""
snake_case__ : Optional[int] = """lower newer"""
return input_text, output_text
def _lowercase ( self : Optional[int] ):
snake_case__ : Any = self.tokenizer_class(self.vocab_file , self.merges_file , **self.special_tokens_map )
snake_case__ : int = """lower newer"""
snake_case__ : Union[str, Any] = ["""l""", """o""", """w""", """er""", """\u0120""", """n""", """e""", """w""", """er"""]
snake_case__ : int = tokenizer.tokenize(a_ ) # , add_prefix_space=True)
self.assertListEqual(a_ , a_ )
snake_case__ : Any = tokens + [tokenizer.unk_token]
snake_case__ : str = [0, 1, 2, 1_5, 1_0, 9, 3, 2, 1_5, 1_9]
self.assertListEqual(tokenizer.convert_tokens_to_ids(a_ ) , a_ )
def _lowercase ( self : List[Any] ):
snake_case__ : int = self.get_tokenizer()
self.assertListEqual(tokenizer.encode("Hello world!" , add_special_tokens=a_ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 2] )
self.assertListEqual(
tokenizer.encode("Hello world! cรฉcรฉ herlolip 418" , add_special_tokens=a_ ) , [0, 3_1_4_1_4, 2_3_2, 3_2_8, 7_4_0, 1_1_4_0, 1_2_6_9_5, 6_9, 4_6_0_7_8, 1_5_8_8, 2] , )
@slow
def _lowercase ( self : Optional[int] ):
snake_case__ : Optional[int] = self.tokenizer_class.from_pretrained("allenai/longformer-base-4096" )
snake_case__ : Union[str, Any] = tokenizer.encode("sequence builders" , add_special_tokens=a_ )
snake_case__ : Tuple = tokenizer.encode("multi-sequence build" , add_special_tokens=a_ )
snake_case__ : Tuple = tokenizer.encode(
"sequence builders" , add_special_tokens=a_ , add_prefix_space=a_ )
snake_case__ : Optional[Any] = tokenizer.encode(
"sequence builders" , "multi-sequence build" , add_special_tokens=a_ , add_prefix_space=a_ )
snake_case__ : List[str] = tokenizer.build_inputs_with_special_tokens(a_ )
snake_case__ : Union[str, Any] = tokenizer.build_inputs_with_special_tokens(a_ , a_ )
assert encoded_sentence == encoded_text_from_decode
assert encoded_pair == encoded_pair_from_decode
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : Dict = """Encode this sequence."""
snake_case__ : Optional[Any] = tokenizer.byte_encoder[""" """.encode("utf-8" )[0]]
# Testing encoder arguments
snake_case__ : Union[str, Any] = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertNotEqual(a_ , a_ )
snake_case__ : Optional[int] = tokenizer.encode(a_ , add_special_tokens=a_ , add_prefix_space=a_ )
snake_case__ : Optional[int] = tokenizer.convert_ids_to_tokens(encoded[0] )[0]
self.assertEqual(a_ , a_ )
tokenizer.add_special_tokens({"bos_token": "<s>"} )
snake_case__ : Union[str, Any] = tokenizer.encode(a_ , add_special_tokens=a_ )
snake_case__ : str = tokenizer.convert_ids_to_tokens(encoded[1] )[0]
self.assertNotEqual(a_ , a_ )
# Testing spaces after special tokens
snake_case__ : List[str] = """<mask>"""
tokenizer.add_special_tokens(
{"mask_token": AddedToken(a_ , lstrip=a_ , rstrip=a_ )} ) # mask token has a left space
snake_case__ : str = tokenizer.convert_tokens_to_ids(a_ )
snake_case__ : int = """Encode <mask> sequence"""
snake_case__ : Tuple = """Encode <mask>sequence"""
snake_case__ : List[str] = tokenizer.encode(a_ )
snake_case__ : Optional[int] = encoded.index(a_ )
snake_case__ : Tuple = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertEqual(a_ , a_ )
snake_case__ : Union[str, Any] = tokenizer.encode(a_ )
snake_case__ : Optional[int] = encoded.index(a_ )
snake_case__ : Any = tokenizer.convert_ids_to_tokens(encoded[mask_loc + 1] )[0]
self.assertNotEqual(a_ , a_ )
def _lowercase ( self : Tuple ):
pass
def _lowercase ( self : List[str] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case__ : str = self.rust_tokenizer_class.from_pretrained(a_ , **a_ )
snake_case__ : Dict = self.tokenizer_class.from_pretrained(a_ , **a_ )
snake_case__ : List[str] = """A, <mask> AllenNLP sentence."""
snake_case__ : Tuple = tokenizer_r.encode_plus(a_ , add_special_tokens=a_ , return_token_type_ids=a_ )
snake_case__ : Optional[int] = tokenizer_p.encode_plus(a_ , add_special_tokens=a_ , return_token_type_ids=a_ )
# token_type_ids should put 0 everywhere
self.assertEqual(sum(tokens_r["token_type_ids"] ) , sum(tokens_p["token_type_ids"] ) )
# attention_mask should put 1 everywhere, so sum over length should be 1
self.assertEqual(
sum(tokens_r["attention_mask"] ) / len(tokens_r["attention_mask"] ) , sum(tokens_p["attention_mask"] ) / len(tokens_p["attention_mask"] ) , )
snake_case__ : int = tokenizer_r.convert_ids_to_tokens(tokens_r["input_ids"] )
snake_case__ : int = tokenizer_p.convert_ids_to_tokens(tokens_p["input_ids"] )
# Rust correctly handles the space before the mask while python doesnt
self.assertSequenceEqual(tokens_p["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(tokens_r["input_ids"] , [0, 2_5_0, 6, 5_0_2_6_4, 3_8_2_3, 4_8_7, 2_1_9_9_2, 3_6_4_5, 4, 2] )
self.assertSequenceEqual(
a_ , ["<s>", "A", ",", "<mask>", "ฤ Allen", "N", "LP", "ฤ sentence", ".", "</s>"] )
self.assertSequenceEqual(
a_ , ["<s>", "A", ",", "<mask>", "ฤ Allen", "N", "LP", "ฤ sentence", ".", "</s>"] )
def _lowercase ( self : List[str] ):
for trim_offsets, add_prefix_space in itertools.product([True, False] , repeat=2 ):
snake_case__ : Any = self.rust_tokenizer_class.from_pretrained(
self.tmpdirname , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
snake_case__ : Optional[Any] = json.loads(tokenizer_r.backend_tokenizer.pre_tokenizer.__getstate__() )
snake_case__ : Any = json.loads(tokenizer_r.backend_tokenizer.post_processor.__getstate__() )
self.assertEqual(pre_tokenizer_state["add_prefix_space"] , a_ )
self.assertEqual(post_processor_state["add_prefix_space"] , a_ )
self.assertEqual(post_processor_state["trim_offsets"] , a_ )
def _lowercase ( self : List[str] ):
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f'''{tokenizer.__class__.__name__} ({pretrained_name})''' ):
snake_case__ : Optional[int] = """hello""" # `hello` is a token in the vocabulary of `pretrained_name`
snake_case__ : int = f'''{text_of_1_token} {text_of_1_token}'''
snake_case__ : Optional[int] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
snake_case__ : Dict = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
snake_case__ : str = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ) + 1, len(a_ ) + 1 + len(a_ )) , )
snake_case__ : Union[str, Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
snake_case__ : Dict = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ), len(a_ ) + 1 + len(a_ )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
snake_case__ : Union[str, Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (len(a_ ), len(a_ ) + 1 + len(a_ )) , )
snake_case__ : Tuple = f''' {text}'''
# tokenizer_r = self.rust_tokenizer_class.from_pretrained(
# pretrained_name, use_fast=True, add_prefix_space=True, trim_offsets=True
# )
# encoding = tokenizer_r(text, return_offsets_mapping=True, add_special_tokens=False)
# self.assertEqual(encoding.offset_mapping[0], (1, 1 + len(text_of_1_token)))
# self.assertEqual(
# encoding.offset_mapping[1],
# (1 + len(text_of_1_token) + 1, 1 + len(text_of_1_token) + 1 + len(text_of_1_token)),
# )
snake_case__ : Optional[Any] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
snake_case__ : Union[str, Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (1, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ) + 1, 1 + len(a_ ) + 1 + len(a_ )) , )
snake_case__ : Tuple = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
snake_case__ : Any = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ), 1 + len(a_ ) + 1 + len(a_ )) , )
snake_case__ : List[str] = self.rust_tokenizer_class.from_pretrained(
a_ , use_fast=a_ , add_prefix_space=a_ , trim_offsets=a_ )
snake_case__ : Optional[Any] = tokenizer_r(a_ , return_offsets_mapping=a_ , add_special_tokens=a_ )
self.assertEqual(encoding.offset_mapping[0] , (0, 1 + len(a_ )) )
self.assertEqual(
encoding.offset_mapping[1] , (1 + len(a_ ), 1 + len(a_ ) + 1 + len(a_ )) , )
| 705 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[int] = get_logger()
__lowerCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case__ : str = str(jax.devices()[0] )
snake_case__ : str = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def _lowercase ( self : Optional[Any] , __A : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def _lowercase ( self : int , __A : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ : Optional[int] = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case__ : Any = {"dtype": jnp.intaa}
else:
snake_case__ : Tuple = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
snake_case__ : Optional[Any] = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ):
snake_case__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def _lowercase ( self : Tuple , __A : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def _lowercase ( self : Optional[int] , __A : pa.Table ):
snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A )
snake_case__ : Tuple = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def _lowercase ( self : Optional[Any] , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A )
snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
snake_case__ : Dict = self._consolidate(__A )
return column
def _lowercase ( self : str , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A )
snake_case__ : int = self.python_features_decoder.decode_batch(__A )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
for column_name in batch:
snake_case__ : Any = self._consolidate(batch[column_name] )
return batch
| 25 | 0 |
import os
import time
import warnings
from dataclasses import dataclass, field
from enum import Enum
from typing import List, Optional, Union
import torch
from filelock import FileLock
from torch.utils.data import Dataset
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import logging
from ..processors.glue import glue_convert_examples_to_features, glue_output_modes, glue_processors
from ..processors.utils import InputFeatures
__lowerCamelCase : List[str] = logging.get_logger(__name__)
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = field(metadata={"help": "The name of the task to train on: " + ", ".join(glue_processors.keys() )} )
a_ = field(
metadata={"help": "The input data dir. Should contain the .tsv files (or other data files) for the task."} )
a_ = field(
default=1_2_8 , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated, sequences shorter will be padded."
)
} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
def _lowercase ( self : List[str] ):
snake_case__ : List[Any] = self.task_name.lower()
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "train"
a_ = "dev"
a_ = "test"
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = 4_2
a_ = 4_2
a_ = 4_2
def __init__( self : str , __A : List[Any] , __A : Tuple , __A : List[str] = None , __A : List[Any] = Split.train , __A : List[Any] = None , ):
warnings.warn(
"This dataset will be removed from the library soon, preprocessing should be handled with the ๐ค Datasets "
"library. You can have a look at this example script for pointers: "
"https://github.com/huggingface/transformers/blob/main/examples/pytorch/text-classification/run_glue.py" , __lowerCAmelCase , )
snake_case__ : Dict = args
snake_case__ : List[Any] = glue_processors[args.task_name]()
snake_case__ : Optional[Any] = glue_output_modes[args.task_name]
if isinstance(__lowerCAmelCase , __lowerCAmelCase ):
try:
snake_case__ : List[Any] = Split[mode]
except KeyError:
raise KeyError("mode is not a valid split name" )
# Load data features from cache or dataset file
snake_case__ : Any = os.path.join(
cache_dir if cache_dir is not None else args.data_dir , f'''cached_{mode.value}_{tokenizer.__class__.__name__}_{args.max_seq_length}_{args.task_name}''' , )
snake_case__ : List[Any] = self.processor.get_labels()
if args.task_name in ["mnli", "mnli-mm"] and tokenizer.__class__.__name__ in (
"RobertaTokenizer",
"RobertaTokenizerFast",
"XLMRobertaTokenizer",
"BartTokenizer",
"BartTokenizerFast",
):
# HACK(label indices are swapped in RoBERTa pretrained model)
snake_case__, snake_case__ : Optional[Any] = label_list[2], label_list[1]
snake_case__ : Optional[Any] = label_list
# Make sure only the first process in distributed training processes the dataset,
# and the others will use the cache.
snake_case__ : List[Any] = cached_features_file + ".lock"
with FileLock(__lowerCAmelCase ):
if os.path.exists(__lowerCAmelCase ) and not args.overwrite_cache:
snake_case__ : Union[str, Any] = time.time()
snake_case__ : List[Any] = torch.load(__lowerCAmelCase )
logger.info(
f'''Loading features from cached file {cached_features_file} [took %.3f s]''' , time.time() - start )
else:
logger.info(f'''Creating features from dataset file at {args.data_dir}''' )
if mode == Split.dev:
snake_case__ : Optional[Any] = self.processor.get_dev_examples(args.data_dir )
elif mode == Split.test:
snake_case__ : Optional[int] = self.processor.get_test_examples(args.data_dir )
else:
snake_case__ : Optional[int] = self.processor.get_train_examples(args.data_dir )
if limit_length is not None:
snake_case__ : Union[str, Any] = examples[:limit_length]
snake_case__ : Any = glue_convert_examples_to_features(
__lowerCAmelCase , __lowerCAmelCase , max_length=args.max_seq_length , label_list=__lowerCAmelCase , output_mode=self.output_mode , )
snake_case__ : str = time.time()
torch.save(self.features , __lowerCAmelCase )
# ^ This seems to take a lot of time so I want to investigate why and how we can improve.
logger.info(
f'''Saving features into cached file {cached_features_file} [took {time.time() - start:.3f} s]''' )
def __len__( self : List[str] ):
return len(self.features )
def __getitem__( self : Any , __A : List[str] ):
return self.features[i]
def _lowercase ( self : Any ):
return self.label_list
| 706 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 0 |
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Any , __A : Optional[Any] , __A : int=7 , __A : Any=3 , __A : Tuple=1_8 , __A : List[Any]=3_0 , __A : Union[str, Any]=4_0_0 , __A : Tuple=True , __A : int=None , __A : List[Any]=True , ):
snake_case__ : List[str] = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : Any = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Tuple = num_channels
snake_case__ : int = image_size
snake_case__ : Optional[Any] = min_resolution
snake_case__ : int = max_resolution
snake_case__ : List[Any] = do_resize
snake_case__ : Tuple = size
snake_case__ : Union[str, Any] = do_normalize
def _lowercase ( self : Union[str, Any] ):
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.8_8_6_6_4_4_3_6_3_4_0_3_3_2_0_3, 0.6_6_1_8_8_2_9_3_6_9_5_4_4_9_8_3, 0.3_8_9_1_7_4_6_4_0_1_7_8_6_8_0_4],
[-0.6_0_4_2_5_5_9_1_4_6_8_8_1_1_0_4, -0.0_2_2_9_5_0_0_8_8_6_0_5_2_8_4_6_9, 0.5_4_2_3_7_9_7_3_6_9_0_0_3_2_9_6],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase , unittest.TestCase ):
"""simple docstring"""
a_ = ImageGPTImageProcessor if is_vision_available() else None
def _lowercase ( self : int ):
snake_case__ : List[str] = ImageGPTImageProcessingTester(self )
@property
def _lowercase ( self : str ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : List[Any] ):
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(lowercase_ , "clusters" ) )
self.assertTrue(hasattr(lowercase_ , "do_resize" ) )
self.assertTrue(hasattr(lowercase_ , "size" ) )
self.assertTrue(hasattr(lowercase_ , "do_normalize" ) )
def _lowercase ( self : int ):
snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} )
snake_case__ : str = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
def _lowercase ( self : Tuple ):
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
snake_case__ : str = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , obj[key] ) )
else:
self.assertEqual(obj[key] , lowercase_ )
def _lowercase ( self : List[str] ):
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Optional[Any] = os.path.join(lowercase_ , "image_processor.json" )
image_processor_first.to_json_file(lowercase_ )
snake_case__ : Optional[int] = self.image_processing_class.from_json_file(lowercase_ ).to_dict()
snake_case__ : List[str] = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowercase_ )
def _lowercase ( self : Dict ):
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(lowercase_ )
snake_case__ : Union[str, Any] = self.image_processing_class.from_pretrained(lowercase_ ).to_dict()
snake_case__ : Any = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(lowercase_ , image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] , lowercase_ )
@unittest.skip("ImageGPT requires clusters at initialization" )
def _lowercase ( self : List[Any] ):
pass
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : int = load_dataset("hf-internal-testing/fixtures_image_utils" , split="test" )
snake_case__ : Union[str, Any] = Image.open(dataset[4]["file"] )
snake_case__ : Optional[Any] = Image.open(dataset[5]["file"] )
snake_case__ : Optional[Any] = [imagea, imagea]
return images
@require_vision
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : List[str] ):
snake_case__ : Union[str, Any] = ImageGPTImageProcessor.from_pretrained("openai/imagegpt-small" )
snake_case__ : Tuple = prepare_images()
# test non-batched
snake_case__ : Optional[Any] = image_processing(images[0] , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (1, 1_0_2_4) )
snake_case__ : Union[str, Any] = [3_0_6, 1_9_1, 1_9_1]
self.assertEqual(encoding.input_ids[0, :3].tolist() , lowercase_ )
# test batched
snake_case__ : int = image_processing(lowercase_ , return_tensors="pt" )
self.assertIsInstance(encoding.input_ids , torch.LongTensor )
self.assertEqual(encoding.input_ids.shape , (2, 1_0_2_4) )
snake_case__ : Dict = [3_0_3, 1_3, 1_3]
self.assertEqual(encoding.input_ids[1, -3:].tolist() , lowercase_ )
| 707 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Tuple = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def _lowercase ( self : Dict ):
snake_case__ : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Union[str, Any] = get_activation("gelu" )
snake_case__ : int = get_activation("gelu_10" )
snake_case__ : Optional[int] = torch_builtin(__A )
snake_case__ : Dict = geluaa(__A )
snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowercase ( self : str ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__A ):
get_activation("bogus" )
with self.assertRaises(__A ):
get_activation(__A )
def _lowercase ( self : List[str] ):
snake_case__ : List[str] = get_activation("gelu" )
snake_case__ : Any = 1
snake_case__ : Union[str, Any] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
snake_case__ : int = acta.a
| 25 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 10 , snake_case_ : int = 1000 , snake_case_ : bool = True ):
assert (
isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
and isinstance(_lowercase , _lowercase )
), "Invalid type of value(s) specified to function!"
if min_val > max_val:
raise ValueError("Invalid value for min_val or max_val (min_value < max_value)" )
return min_val if option else max_val
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int ):
return int((number_a + number_a) / 2 )
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int , snake_case_ : int ):
assert (
isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase ) and isinstance(_lowercase , _lowercase )
), 'argument values must be type of "int"'
if lower > higher:
raise ValueError("argument value for lower and higher must be(lower > higher)" )
if not lower < to_guess < higher:
raise ValueError(
"guess value must be within the range of lower and higher value" )
def answer(snake_case_ : int ) -> str:
if number > to_guess:
return "high"
elif number < to_guess:
return "low"
else:
return "same"
print("started..." )
snake_case__ : Optional[Any] = lower
snake_case__ : List[Any] = higher
snake_case__ : Tuple = []
while True:
snake_case__ : List[Any] = get_avg(_lowercase , _lowercase )
last_numbers.append(_lowercase )
if answer(_lowercase ) == "low":
snake_case__ : Optional[int] = number
elif answer(_lowercase ) == "high":
snake_case__ : Tuple = number
else:
break
print(F'''guess the number : {last_numbers[-1]}''' )
print(F'''details : {last_numbers!s}''' )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Tuple = int(input("Enter lower value : " ).strip() )
snake_case__ : Dict = int(input("Enter high value : " ).strip() )
snake_case__ : Optional[int] = int(input("Enter value to guess : " ).strip() )
guess_the_number(_lowercase , _lowercase , _lowercase )
if __name__ == "__main__":
main()
| 708 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : str = value
elif weight_type == "weight_g":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : str = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
snake_case__ : str = []
snake_case__ : Optional[int] = fairseq_model.state_dict()
snake_case__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[Any] = "weight"
else:
snake_case__ : Optional[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
snake_case__ : Tuple = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : str = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ):
if config_path is not None:
snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = UniSpeechSatConfig()
snake_case__ : str = ""
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ )
else:
snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ )
snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25 | 0 |
import unittest
from transformers import AutoTokenizer, is_flax_available
from transformers.testing_utils import require_flax, require_sentencepiece, require_tokenizers, slow
if is_flax_available():
import jax.numpy as jnp
from transformers import FlaxXLMRobertaModel
@require_sentencepiece
@require_tokenizers
@require_flax
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : str ):
snake_case__ : Dict = FlaxXLMRobertaModel.from_pretrained("xlm-roberta-base" )
snake_case__ : List[str] = AutoTokenizer.from_pretrained("xlm-roberta-base" )
snake_case__ : Dict = "The dog is cute and lives in the garden house"
snake_case__ : List[str] = jnp.array([tokenizer.encode(__lowerCAmelCase )] )
snake_case__ : Any = (1, 1_2, 7_6_8) # batch_size, sequence_length, embedding_vector_dim
snake_case__ : int = jnp.array(
[[-0.0_1_0_1, 0.1_2_1_8, -0.0_8_0_3, 0.0_8_0_1, 0.1_3_2_7, 0.0_7_7_6, -0.1_2_1_5, 0.2_3_8_3, 0.3_3_3_8, 0.3_1_0_6, 0.0_3_0_0, 0.0_2_5_2]] )
snake_case__ : List[Any] = model(__lowerCAmelCase )["last_hidden_state"]
self.assertEqual(output.shape , __lowerCAmelCase )
# compare the actual values for a slice of last dim
self.assertTrue(jnp.allclose(output[:, :, -1] , __lowerCAmelCase , atol=1e-3 ) )
| 709 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , ):
if attention_mask is None:
snake_case__ : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case__ : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case__ : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ )
if decoder_head_mask is None:
snake_case__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
if cross_attn_head_mask is None:
snake_case__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Any , __A : List[str]=1_3 , __A : List[Any]=7 , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : str=9_9 , __A : Optional[Any]=1_6 , __A : Optional[Any]=2 , __A : Any=4 , __A : List[Any]=4 , __A : int="relu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[Any]=2_0 , __A : Optional[Any]=2 , __A : int=1 , __A : Union[str, Any]=0 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : List[str] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : List[str] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : int = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Tuple = eos_token_id
snake_case__ : Dict = pad_token_id
snake_case__ : str = bos_token_id
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.eos_token_id # Eos Token
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case__ : int = input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Union[str, Any] = self.get_config()
snake_case__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _lowercase ( self : Dict ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : Optional[Any] , __A : int , __A : Dict ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
snake_case__ : List[Any] = inputs_dict["input_ids"]
snake_case__ : Optional[Any] = inputs_dict["attention_mask"]
snake_case__ : Union[str, Any] = inputs_dict["head_mask"]
# first forward pass
snake_case__ : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
snake_case__, snake_case__ : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case__ : Tuple = model(__A , attention_mask=__A )["last_hidden_state"]
snake_case__ : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[
"last_hidden_state"
]
# select random slice
snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) )
def _lowercase ( self : str , __A : Dict , __A : Optional[Any] ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval()
snake_case__ : Union[str, Any] = model(**__A )
snake_case__ : Tuple = outputs.encoder_last_hidden_state
snake_case__ : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_encoder()
encoder.save_pretrained(__A )
snake_case__ : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_decoder()
decoder.save_pretrained(__A )
snake_case__ : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a_ = True
a_ = True
a_ = False
a_ = False
def _lowercase ( self : int , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowercase ( self : Tuple ):
snake_case__ : Any = MaMaaaModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__A )
def _lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
snake_case__, snake_case__ : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["missing_keys"] , [] )
def _lowercase ( self : Dict ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def _lowercase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case__ : str = model_class(__A )
model.to(__A )
model.eval()
snake_case__ : str = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
snake_case__ : Optional[Any] = inputs["input_ids"]
del inputs["input_ids"]
else:
snake_case__ : Union[str, Any] = inputs["input_ids"]
snake_case__ : List[str] = inputs.get("decoder_input_ids" , __A )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , __A )
snake_case__ : Tuple = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case__ : List[Any] = wte(__A )
else:
snake_case__ : Any = wte(__A )
snake_case__ : Optional[int] = wte(__A )
with torch.no_grad():
model(**__A )[0]
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Any = input_dict["input_ids"]
snake_case__ : int = input_ids.ne(1 ).to(__A )
snake_case__ : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ )
__lowerCamelCase : Optional[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ):
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def _lowercase ( self : Optional[int] ):
snake_case__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : str = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : Optional[Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
# change to intended input
snake_case__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : List[str] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
snake_case__ : List[Any] = [
"L'affaire NSA souligne l'absence totale de dรฉbat sur le renseignement",
"Selon moi, il y a deux niveaux de rรฉponse de la part du gouvernement franรงais.",
"Lorsque Franรงois Hollande tรฉlรฉphone ร Barack Obama ou quand le ministre des affaires รฉtrangรจres Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils rรฉagissent ร une vraie dรฉcouverte, qui est celle de"
" l'ampleur de la surveillance amรฉricaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case__ : str = tokenizer(__A , padding=__A , return_tensors="pt" )
snake_case__ : Tuple = model.generate(
input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
snake_case__ : List[str] = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When Franรงois Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
snake_case__ : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 25 | 0 |
import json
import os
import pickle
import shutil
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
from datasets import Dataset
from transformers import is_faiss_available
from transformers.models.bart.configuration_bart import BartConfig
from transformers.models.bart.tokenization_bart import BartTokenizer
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES as DPR_VOCAB_FILES_NAMES
from transformers.models.dpr.configuration_dpr import DPRConfig
from transformers.models.dpr.tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer
from transformers.models.rag.configuration_rag import RagConfig
from transformers.models.rag.retrieval_rag import CustomHFIndex, RagRetriever
from transformers.models.roberta.tokenization_roberta import VOCAB_FILES_NAMES as BART_VOCAB_FILES_NAMES
from transformers.testing_utils import require_faiss, require_sentencepiece, require_tokenizers, require_torch
if is_faiss_available():
import faiss
@require_faiss
class SCREAMING_SNAKE_CASE__ ( __UpperCAmelCase ):
"""simple docstring"""
def _lowercase ( self : Dict ):
snake_case__ : Optional[Any] = tempfile.mkdtemp()
snake_case__ : int = 8
# DPR tok
snake_case__ : Union[str, Any] = [
"[UNK]",
"[CLS]",
"[SEP]",
"[PAD]",
"[MASK]",
"want",
"##want",
"##ed",
"wa",
"un",
"runn",
"##ing",
",",
"low",
"lowest",
]
snake_case__ : str = os.path.join(self.tmpdirname , "dpr_tokenizer" )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
snake_case__ : Any = os.path.join(UpperCAmelCase_ , DPR_VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as vocab_writer:
vocab_writer.write("".join([x + "\n" for x in vocab_tokens] ) )
# BART tok
snake_case__ : Dict = [
"l",
"o",
"w",
"e",
"r",
"s",
"t",
"i",
"d",
"n",
"\u0120",
"\u0120l",
"\u0120n",
"\u0120lo",
"\u0120low",
"er",
"\u0120lowest",
"\u0120newer",
"\u0120wider",
"<unk>",
]
snake_case__ : Optional[int] = dict(zip(UpperCAmelCase_ , range(len(UpperCAmelCase_ ) ) ) )
snake_case__ : List[str] = ["#version: 0.2", "\u0120 l", "\u0120l o", "\u0120lo w", "e r", ""]
snake_case__ : List[str] = {"unk_token": "<unk>"}
snake_case__ : List[str] = os.path.join(self.tmpdirname , "bart_tokenizer" )
os.makedirs(UpperCAmelCase_ , exist_ok=UpperCAmelCase_ )
snake_case__ : str = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : int = os.path.join(UpperCAmelCase_ , BART_VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCAmelCase_ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(UpperCAmelCase_ ) )
def _lowercase ( self : str ):
return DPRQuestionEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def _lowercase ( self : Any ):
return DPRContextEncoderTokenizer.from_pretrained(os.path.join(self.tmpdirname , "dpr_tokenizer" ) )
def _lowercase ( self : List[str] ):
return BartTokenizer.from_pretrained(os.path.join(self.tmpdirname , "bart_tokenizer" ) )
def _lowercase ( self : str ):
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size ), 2 * np.ones(self.retrieval_vector_size )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
return dataset
def _lowercase ( self : Union[str, Any] ):
snake_case__ : List[str] = self.get_dummy_dataset()
snake_case__ : Optional[Any] = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , )
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
snake_case__ : Any = dataset
snake_case__ : str = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
return retriever
def _lowercase ( self : str , __A : List[str] ):
snake_case__ : Dict = self.get_dummy_dataset()
snake_case__ : int = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="custom" , )
if from_disk:
snake_case__ : List[str] = os.path.join(self.tmpdirname , "dataset" )
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , "index.faiss" )
dataset.get_index("embeddings" ).save(os.path.join(self.tmpdirname , "index.faiss" ) )
dataset.drop_index("embeddings" )
dataset.save_to_disk(os.path.join(self.tmpdirname , "dataset" ) )
del dataset
snake_case__ : Union[str, Any] = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , )
else:
snake_case__ : str = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() , index=CustomHFIndex(config.retrieval_vector_size , UpperCAmelCase_ ) , )
return retriever
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = Dataset.from_dict(
{
"id": ["0", "1"],
"text": ["foo", "bar"],
"title": ["Foo", "Bar"],
"embeddings": [np.ones(self.retrieval_vector_size + 1 ), 2 * np.ones(self.retrieval_vector_size + 1 )],
} )
dataset.add_faiss_index("embeddings" , string_factory="Flat" , metric_type=faiss.METRIC_INNER_PRODUCT )
snake_case__ : List[Any] = os.path.join(self.tmpdirname , "hf_bert_base.hnswSQ8_correct_phi_128.c_index" )
dataset.save_faiss_index("embeddings" , index_file_name + ".index.dpr" )
pickle.dump(dataset["id"] , open(index_file_name + ".index_meta.dpr" , "wb" ) )
snake_case__ : List[Any] = os.path.join(self.tmpdirname , "psgs_w100.tsv.pkl" )
snake_case__ : Union[str, Any] = {sample["id"]: [sample["text"], sample["title"]] for sample in dataset}
pickle.dump(UpperCAmelCase_ , open(UpperCAmelCase_ , "wb" ) )
snake_case__ : Any = RagConfig(
retrieval_vector_size=self.retrieval_vector_size , question_encoder=DPRConfig().to_dict() , generator=BartConfig().to_dict() , index_name="legacy" , index_path=self.tmpdirname , )
snake_case__ : Union[str, Any] = RagRetriever(
UpperCAmelCase_ , question_encoder_tokenizer=self.get_dpr_tokenizer() , generator_tokenizer=self.get_bart_tokenizer() )
return retriever
def _lowercase ( self : Optional[int] ):
snake_case__ : List[Any] = 1
snake_case__ : int = self.get_dummy_canonical_hf_index_retriever()
snake_case__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__, snake_case__, snake_case__ : Any = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : Any ):
snake_case__ : List[Any] = self.get_dummy_canonical_hf_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
with patch("transformers.models.rag.retrieval_rag.load_dataset" ) as mock_load_dataset:
snake_case__ : str = self.get_dummy_dataset()
retriever.save_pretrained(UpperCAmelCase_ )
snake_case__ : List[str] = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Tuple = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase ( self : Dict ):
snake_case__ : str = 1
snake_case__ : Tuple = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
snake_case__ : Any = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__, snake_case__, snake_case__ : Union[str, Any] = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : Dict ):
snake_case__ : List[str] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_ )
snake_case__ : Union[str, Any] = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ : Optional[Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Any = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase ( self : List[str] ):
snake_case__ : Optional[int] = 1
snake_case__ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
snake_case__ : Union[str, Any] = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__, snake_case__, snake_case__ : Optional[Any] = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["embeddings", "id", "text", "title"] )
self.assertEqual(len(doc_dicts[0]["id"] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]["id"][0] , "1" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["id"][0] , "0" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : Dict ):
snake_case__ : Optional[Any] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_ )
snake_case__ : Optional[int] = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : List[Any] = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Tuple = 1
snake_case__ : Optional[Any] = self.get_dummy_legacy_index_retriever()
snake_case__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__, snake_case__, snake_case__ : Optional[int] = retriever.retrieve(UpperCAmelCase_ , n_docs=UpperCAmelCase_ )
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertEqual(len(UpperCAmelCase_ ) , 2 )
self.assertEqual(sorted(doc_dicts[0] ) , ["text", "title"] )
self.assertEqual(len(doc_dicts[0]["text"] ) , UpperCAmelCase_ )
self.assertEqual(doc_dicts[0]["text"][0] , "bar" ) # max inner product is reached with second doc
self.assertEqual(doc_dicts[1]["text"][0] , "foo" ) # max inner product is reached with first doc
self.assertListEqual(doc_ids.tolist() , [[1], [0]] )
def _lowercase ( self : List[str] ):
snake_case__ : Any = self.get_dummy_legacy_index_retriever()
with tempfile.TemporaryDirectory() as tmp_dirname:
retriever.save_pretrained(UpperCAmelCase_ )
snake_case__ : List[Any] = RagRetriever.from_pretrained(UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
snake_case__ : Dict = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Optional[Any] = retriever.retrieve(UpperCAmelCase_ , n_docs=1 )
self.assertTrue(out is not None )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase ( self : Union[str, Any] ):
import torch
snake_case__ : List[str] = 1
snake_case__ : int = self.get_dummy_canonical_hf_index_retriever()
snake_case__ : List[Any] = [[5, 7], [1_0, 1_1]]
snake_case__ : int = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Dict = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ )
snake_case__, snake_case__, snake_case__ : str = (
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , UpperCAmelCase_ )
self.assertIsInstance(UpperCAmelCase_ , np.ndarray )
snake_case__ : str = retriever(
UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ , return_tensors="pt" , )
snake_case__, snake_case__, snake_case__, snake_case__ : Dict = ( # noqa: F841
out["context_input_ids"],
out["context_attention_mask"],
out["retrieved_doc_embeds"],
out["doc_ids"],
)
self.assertEqual(retrieved_doc_embeds.shape , (2, n_docs, self.retrieval_vector_size) )
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
self.assertIsInstance(UpperCAmelCase_ , torch.Tensor )
@require_torch
@require_tokenizers
@require_sentencepiece
def _lowercase ( self : List[Any] ):
snake_case__ : Tuple = self.get_dpr_ctx_encoder_tokenizer()
snake_case__ : Tuple = 1
snake_case__ : Union[str, Any] = self.get_dummy_custom_hf_index_retriever(from_disk=UpperCAmelCase_ )
retriever.set_ctx_encoder_tokenizer(UpperCAmelCase_ )
snake_case__ : Optional[Any] = [[5, 7], [1_0, 1_1]]
snake_case__ : Tuple = np.array(
[np.ones(self.retrieval_vector_size ), -np.ones(self.retrieval_vector_size )] , dtype=np.floataa )
snake_case__ : Union[str, Any] = retriever(UpperCAmelCase_ , UpperCAmelCase_ , prefix=retriever.config.generator.prefix , n_docs=UpperCAmelCase_ )
self.assertEqual(
len(UpperCAmelCase_ ) , 6 ) # check whether the retriever output consist of 6 attributes including tokenized docs
self.assertEqual(
all(k in out for k in ("tokenized_doc_ids", "tokenized_doc_attention_mask") ) , UpperCAmelCase_ ) # check for doc token related keys in dictionary.
| 710 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] ):
snake_case__ : Optional[int] = []
for part_id in partition_order:
snake_case__ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(snake_case_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Union[str, Any] = spark.range(100 ).repartition(1 )
snake_case__ : Any = Spark(snake_case_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[Any] = spark.range(10 ).repartition(2 )
snake_case__ : Optional[Any] = [1, 0]
snake_case__ : Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions.
snake_case__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case__, snake_case__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[int] = spark.range(10 ).repartition(1 )
snake_case__ : Union[str, Any] = SparkExamplesIterable(snake_case_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
snake_case__ : Union[str, Any] = lambda snake_case_ : x.reverse()
snake_case__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] )
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case__ : Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(100 ).repartition(1 )
snake_case__ : Union[str, Any] = Spark(snake_case_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 25 | 0 |
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = 4_2 # [batch_size x 3]
a_ = 4_2 # [batch_size x 3]
a_ = 4_2 # [batch_size x 3]
a_ = 4_2 # [batch_size x 3]
a_ = 4_2
a_ = 4_2
a_ = 4_2
a_ = 4_2
a_ = 4_2
def _lowercase ( self : List[Any] ):
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _lowercase ( self : str ):
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _lowercase ( self : List[Any] ):
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _lowercase ( self : str ):
snake_case__ : List[str] = torch.arange(self.height * self.width )
snake_case__ : List[str] = torch.stack(
[
pixel_indices % self.width,
torch.div(UpperCamelCase__ , self.width , rounding_mode="trunc" ),
] , axis=1 , )
return coords
@property
def _lowercase ( self : List[Any] ):
snake_case__ : List[Any] = self.shape
snake_case__ : List[str] = int(np.prod(UpperCamelCase__ ) )
snake_case__ : Union[str, Any] = self.get_image_coords()
snake_case__ : List[str] = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
snake_case__ : List[str] = self.get_camera_rays(UpperCamelCase__ )
snake_case__ : str = rays.view(UpperCamelCase__ , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _lowercase ( self : Dict , __A : torch.Tensor ):
snake_case__ : Optional[Any] = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
snake_case__ : Union[str, Any] = coords.view(UpperCamelCase__ , -1 , 2 )
snake_case__ : Any = self.resolution()
snake_case__ : str = self.fov()
snake_case__ : str = (flat.float() / (res - 1)) * 2 - 1
snake_case__ : List[str] = fracs * torch.tan(fov / 2 )
snake_case__ : int = fracs.view(UpperCamelCase__ , -1 , 2 )
snake_case__ : Optional[Any] = (
self.z.view(UpperCamelCase__ , 1 , 3 )
+ self.x.view(UpperCamelCase__ , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(UpperCamelCase__ , 1 , 3 ) * fracs[:, :, 1:]
)
snake_case__ : Tuple = directions / directions.norm(dim=-1 , keepdim=UpperCamelCase__ )
snake_case__ : Union[str, Any] = torch.stack(
[
torch.broadcast_to(self.origin.view(UpperCamelCase__ , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(UpperCamelCase__ , *UpperCamelCase__ , 2 , 3 )
def _lowercase ( self : Union[str, Any] , __A : int , __A : int ):
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=UpperCamelCase__ , height=UpperCamelCase__ , x_fov=self.x_fov , y_fov=self.y_fov , )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
snake_case__ : Tuple = []
snake_case__ : Dict = []
snake_case__ : List[str] = []
snake_case__ : Tuple = []
for theta in np.linspace(0 , 2 * np.pi , num=20 ):
snake_case__ : int = np.array([np.sin(_lowercase ), np.cos(_lowercase ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
snake_case__ : Tuple = -z * 4
snake_case__ : Optional[int] = np.array([np.cos(_lowercase ), -np.sin(_lowercase ), 0.0] )
snake_case__ : Tuple = np.cross(_lowercase , _lowercase )
origins.append(_lowercase )
xs.append(_lowercase )
ys.append(_lowercase )
zs.append(_lowercase )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_lowercase , axis=0 ) ).float() , width=_lowercase , height=_lowercase , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_lowercase )) , )
| 711 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
__lowerCamelCase : str = {
"""configuration_groupvit""": [
"""GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""GroupViTConfig""",
"""GroupViTOnnxConfig""",
"""GroupViTTextConfig""",
"""GroupViTVisionConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GroupViTModel""",
"""GroupViTPreTrainedModel""",
"""GroupViTTextModel""",
"""GroupViTVisionModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFGroupViTModel""",
"""TFGroupViTPreTrainedModel""",
"""TFGroupViTTextModel""",
"""TFGroupViTVisionModel""",
]
if TYPE_CHECKING:
from .configuration_groupvit import (
GROUPVIT_PRETRAINED_CONFIG_ARCHIVE_MAP,
GroupViTConfig,
GroupViTOnnxConfig,
GroupViTTextConfig,
GroupViTVisionConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_groupvit import (
GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
GroupViTModel,
GroupViTPreTrainedModel,
GroupViTTextModel,
GroupViTVisionModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_groupvit import (
TF_GROUPVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFGroupViTModel,
TFGroupViTPreTrainedModel,
TFGroupViTTextModel,
TFGroupViTVisionModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 712 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 25 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_pytesseract, require_torch
from transformers.utils import is_pytesseract_available, is_torch_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_pytesseract_available():
from PIL import Image
from transformers import LayoutLMvaImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __A : Tuple , __A : Optional[int]=7 , __A : int=3 , __A : List[str]=1_8 , __A : Dict=3_0 , __A : int=4_0_0 , __A : int=True , __A : str=None , __A : Optional[Any]=True , ):
snake_case__ : Optional[Any] = size if size is not None else {"""height""": 1_8, """width""": 1_8}
snake_case__ : Optional[int] = parent
snake_case__ : List[Any] = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : Tuple = image_size
snake_case__ : Optional[int] = min_resolution
snake_case__ : int = max_resolution
snake_case__ : Optional[Any] = do_resize
snake_case__ : str = size
snake_case__ : Any = apply_ocr
def _lowercase ( self : List[Any] ):
return {"do_resize": self.do_resize, "size": self.size, "apply_ocr": self.apply_ocr}
@require_torch
@require_pytesseract
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase__ , unittest.TestCase ):
"""simple docstring"""
a_ = LayoutLMvaImageProcessor if is_pytesseract_available() else None
def _lowercase ( self : str ):
snake_case__ : Tuple = LayoutLMvaImageProcessingTester(self )
@property
def _lowercase ( self : Tuple ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Union[str, Any] ):
snake_case__ : List[Any] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "size" ) )
self.assertTrue(hasattr(__A , "apply_ocr" ) )
def _lowercase ( self : Optional[int] ):
snake_case__ : int = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"height": 1_8, "width": 1_8} )
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 )
self.assertEqual(image_processor.size , {"height": 4_2, "width": 4_2} )
def _lowercase ( self : Union[str, Any] ):
pass
def _lowercase ( self : Tuple ):
snake_case__ : Tuple = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" )
self.assertEqual(
encoding.pixel_values.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
self.assertIsInstance(encoding.words , __A )
self.assertIsInstance(encoding.boxes , __A )
# Test batched
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : int = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Optional[int] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
snake_case__ : List[str] = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : List[str] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
# Test batched
snake_case__ : Any = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.size["height"],
self.image_processor_tester.size["width"],
) , )
def _lowercase ( self : int ):
snake_case__ : str = LayoutLMvaImageProcessor()
from datasets import load_dataset
snake_case__ : Any = load_dataset("hf-internal-testing/fixtures_docvqa" , split="test" )
snake_case__ : Union[str, Any] = Image.open(ds[0]["file"] ).convert("RGB" )
snake_case__ : Union[str, Any] = image_processing(__A , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
self.assertEqual(len(encoding.words ) , len(encoding.boxes ) )
# fmt: off
# the words and boxes were obtained with Tesseract 4.1.1
snake_case__ : List[str] = [["""11:14""", """to""", """11:39""", """a.m""", """11:39""", """to""", """11:44""", """a.m.""", """11:44""", """a.m.""", """to""", """12:25""", """p.m.""", """12:25""", """to""", """12:58""", """p.m.""", """12:58""", """to""", """4:00""", """p.m.""", """2:00""", """to""", """5:00""", """p.m.""", """Coffee""", """Break""", """Coffee""", """will""", """be""", """served""", """for""", """men""", """and""", """women""", """in""", """the""", """lobby""", """adjacent""", """to""", """exhibit""", """area.""", """Please""", """move""", """into""", """exhibit""", """area.""", """(Exhibits""", """Open)""", """TRRF""", """GENERAL""", """SESSION""", """(PART""", """|)""", """Presiding:""", """Lee""", """A.""", """Waller""", """TRRF""", """Vice""", """President""", """โIntroductory""", """Remarksโ""", """Lee""", """A.""", """Waller,""", """TRRF""", """Vice""", """Presi-""", """dent""", """Individual""", """Interviews""", """with""", """TRRF""", """Public""", """Board""", """Members""", """and""", """Sci-""", """entific""", """Advisory""", """Council""", """Mem-""", """bers""", """Conducted""", """by""", """TRRF""", """Treasurer""", """Philip""", """G.""", """Kuehn""", """to""", """get""", """answers""", """which""", """the""", """public""", """refrigerated""", """warehousing""", """industry""", """is""", """looking""", """for.""", """Plus""", """questions""", """from""", """the""", """floor.""", """Dr.""", """Emil""", """M.""", """Mrak,""", """University""", """of""", """Cal-""", """ifornia,""", """Chairman,""", """TRRF""", """Board;""", """Sam""", """R.""", """Cecil,""", """University""", """of""", """Georgia""", """College""", """of""", """Agriculture;""", """Dr.""", """Stanley""", """Charm,""", """Tufts""", """University""", """School""", """of""", """Medicine;""", """Dr.""", """Robert""", """H.""", """Cotton,""", """ITT""", """Continental""", """Baking""", """Company;""", """Dr.""", """Owen""", """Fennema,""", """University""", """of""", """Wis-""", """consin;""", """Dr.""", """Robert""", """E.""", """Hardenburg,""", """USDA.""", """Questions""", """and""", """Answers""", """Exhibits""", """Open""", """Capt.""", """Jack""", """Stoney""", """Room""", """TRRF""", """Scientific""", """Advisory""", """Council""", """Meeting""", """Ballroom""", """Foyer"""]] # noqa: E231
snake_case__ : Dict = [[[1_4_1, 5_7, 2_1_4, 6_9], [2_2_8, 5_8, 2_5_2, 6_9], [1_4_1, 7_5, 2_1_6, 8_8], [2_3_0, 7_9, 2_8_0, 8_8], [1_4_2, 2_6_0, 2_1_8, 2_7_3], [2_3_0, 2_6_1, 2_5_5, 2_7_3], [1_4_3, 2_7_9, 2_1_8, 2_9_0], [2_3_1, 2_8_2, 2_9_0, 2_9_1], [1_4_3, 3_4_2, 2_1_8, 3_5_4], [2_3_1, 3_4_5, 2_8_9, 3_5_5], [2_0_2, 3_6_2, 2_2_7, 3_7_3], [1_4_3, 3_7_9, 2_2_0, 3_9_2], [2_3_1, 3_8_2, 2_9_1, 3_9_4], [1_4_4, 7_1_4, 2_2_0, 7_2_6], [2_3_1, 7_1_5, 2_5_6, 7_2_6], [1_4_4, 7_3_2, 2_2_0, 7_4_5], [2_3_2, 7_3_6, 2_9_1, 7_4_7], [1_4_4, 7_6_9, 2_1_8, 7_8_2], [2_3_1, 7_7_0, 2_5_6, 7_8_2], [1_4_1, 7_8_8, 2_0_2, 8_0_1], [2_1_5, 7_9_1, 2_7_4, 8_0_4], [1_4_3, 8_2_6, 2_0_4, 8_3_8], [2_1_5, 8_2_6, 2_4_0, 8_3_8], [1_4_2, 8_4_4, 2_0_2, 8_5_7], [2_1_5, 8_4_7, 2_7_4, 8_5_9], [3_3_4, 5_7, 4_2_7, 6_9], [4_4_0, 5_7, 5_2_2, 6_9], [3_6_9, 7_5, 4_6_1, 8_8], [4_6_9, 7_5, 5_1_6, 8_8], [5_2_8, 7_6, 5_6_2, 8_8], [5_7_0, 7_6, 6_6_7, 8_8], [6_7_5, 7_5, 7_1_1, 8_7], [7_2_1, 7_9, 7_7_8, 8_8], [7_8_9, 7_5, 8_4_0, 8_8], [3_6_9, 9_7, 4_7_0, 1_0_7], [4_8_4, 9_4, 5_0_7, 1_0_6], [5_1_8, 9_4, 5_6_2, 1_0_7], [5_7_6, 9_4, 6_5_5, 1_1_0], [6_6_8, 9_4, 7_9_2, 1_0_9], [8_0_4, 9_5, 8_2_9, 1_0_7], [3_6_9, 1_1_3, 4_6_5, 1_2_5], [4_7_7, 1_1_6, 5_4_7, 1_2_5], [5_6_2, 1_1_3, 6_5_8, 1_2_5], [6_7_1, 1_1_6, 7_4_8, 1_2_5], [7_6_1, 1_1_3, 8_1_1, 1_2_5], [3_6_9, 1_3_1, 4_6_5, 1_4_3], [4_7_7, 1_3_3, 5_4_8, 1_4_3], [5_6_3, 1_3_0, 6_9_8, 1_4_5], [7_1_0, 1_3_0, 8_0_2, 1_4_6], [3_3_6, 1_7_1, 4_1_2, 1_8_3], [4_2_3, 1_7_1, 5_7_2, 1_8_3], [5_8_2, 1_7_0, 7_1_6, 1_8_4], [7_2_8, 1_7_1, 8_1_7, 1_8_7], [8_2_9, 1_7_1, 8_4_4, 1_8_6], [3_3_8, 1_9_7, 4_8_2, 2_1_2], [5_0_7, 1_9_6, 5_5_7, 2_0_9], [5_6_9, 1_9_6, 5_9_5, 2_0_8], [6_1_0, 1_9_6, 7_0_2, 2_0_9], [5_0_5, 2_1_4, 5_8_3, 2_2_6], [5_9_5, 2_1_4, 6_5_6, 2_2_7], [6_7_0, 2_1_5, 8_0_7, 2_2_7], [3_3_5, 2_5_9, 5_4_3, 2_7_4], [5_5_6, 2_5_9, 7_0_8, 2_7_2], [3_7_2, 2_7_9, 4_2_2, 2_9_1], [4_3_5, 2_7_9, 4_6_0, 2_9_1], [4_7_4, 2_7_9, 5_7_4, 2_9_2], [5_8_7, 2_7_8, 6_6_4, 2_9_1], [6_7_6, 2_7_8, 7_3_8, 2_9_1], [7_5_1, 2_7_9, 8_3_4, 2_9_1], [3_7_2, 2_9_8, 4_3_4, 3_1_0], [3_3_5, 3_4_1, 4_8_3, 3_5_4], [4_9_7, 3_4_1, 6_5_5, 3_5_4], [6_6_7, 3_4_1, 7_2_8, 3_5_4], [7_4_0, 3_4_1, 8_2_5, 3_5_4], [3_3_5, 3_6_0, 4_3_0, 3_7_2], [4_4_2, 3_6_0, 5_3_4, 3_7_2], [5_4_5, 3_5_9, 6_8_7, 3_7_2], [6_9_7, 3_6_0, 7_5_4, 3_7_2], [7_6_5, 3_6_0, 8_2_3, 3_7_3], [3_3_4, 3_7_8, 4_2_8, 3_9_1], [4_4_0, 3_7_8, 5_7_7, 3_9_4], [5_9_0, 3_7_8, 7_0_5, 3_9_1], [7_2_0, 3_7_8, 8_0_1, 3_9_1], [3_3_4, 3_9_7, 4_0_0, 4_0_9], [3_7_0, 4_1_6, 5_2_9, 4_2_9], [5_4_4, 4_1_6, 5_7_6, 4_3_2], [5_8_7, 4_1_6, 6_6_5, 4_2_8], [6_7_7, 4_1_6, 8_1_4, 4_2_9], [3_7_2, 4_3_5, 4_5_2, 4_5_0], [4_6_5, 4_3_4, 4_9_5, 4_4_7], [5_1_1, 4_3_4, 6_0_0, 4_4_7], [6_1_1, 4_3_6, 6_3_7, 4_4_7], [6_4_9, 4_3_6, 6_9_4, 4_5_1], [7_0_5, 4_3_8, 8_2_4, 4_4_7], [3_6_9, 4_5_3, 4_5_2, 4_6_6], [4_6_4, 4_5_4, 5_0_9, 4_6_6], [5_2_2, 4_5_3, 6_1_1, 4_6_9], [6_2_5, 4_5_3, 7_9_2, 4_6_9], [3_7_0, 4_7_2, 5_5_6, 4_8_8], [5_7_0, 4_7_2, 6_8_4, 4_8_7], [6_9_7, 4_7_2, 7_1_8, 4_8_5], [7_3_2, 4_7_2, 8_3_5, 4_8_8], [3_6_9, 4_9_0, 4_1_1, 5_0_3], [4_2_5, 4_9_0, 4_8_4, 5_0_3], [4_9_6, 4_9_0, 6_3_5, 5_0_6], [6_4_5, 4_9_0, 7_0_7, 5_0_3], [7_1_8, 4_9_1, 7_6_1, 5_0_3], [7_7_1, 4_9_0, 8_4_0, 5_0_3], [3_3_6, 5_1_0, 3_7_4, 5_2_1], [3_8_8, 5_1_0, 4_4_7, 5_2_2], [4_6_0, 5_1_0, 4_8_9, 5_2_1], [5_0_3, 5_1_0, 5_8_0, 5_2_2], [5_9_2, 5_0_9, 7_3_6, 5_2_5], [7_4_5, 5_0_9, 7_7_0, 5_2_2], [7_8_1, 5_0_9, 8_4_0, 5_2_2], [3_3_8, 5_2_8, 4_3_4, 5_4_1], [4_4_8, 5_2_8, 5_9_6, 5_4_1], [6_0_9, 5_2_7, 6_8_7, 5_4_0], [7_0_0, 5_2_8, 7_9_2, 5_4_1], [3_3_6, 5_4_6, 3_9_7, 5_5_9], [4_0_7, 5_4_6, 4_3_1, 5_5_9], [4_4_3, 5_4_6, 5_2_5, 5_6_0], [5_3_7, 5_4_6, 6_8_0, 5_6_2], [6_8_8, 5_4_6, 7_1_4, 5_5_9], [7_2_2, 5_4_6, 8_3_7, 5_6_2], [3_3_6, 5_6_5, 4_4_9, 5_8_1], [4_6_1, 5_6_5, 4_8_5, 5_7_7], [4_9_7, 5_6_5, 6_6_5, 5_8_1], [6_8_1, 5_6_5, 7_1_8, 5_7_7], [7_3_2, 5_6_5, 8_3_7, 5_8_0], [3_3_7, 5_8_4, 4_3_8, 5_9_7], [4_5_2, 5_8_3, 5_2_1, 5_9_6], [5_3_5, 5_8_4, 6_7_7, 5_9_9], [6_9_0, 5_8_3, 7_8_7, 5_9_6], [8_0_1, 5_8_3, 8_2_5, 5_9_6], [3_3_8, 6_0_2, 4_7_8, 6_1_5], [4_9_2, 6_0_2, 5_3_0, 6_1_4], [5_4_3, 6_0_2, 6_3_8, 6_1_5], [6_5_0, 6_0_2, 6_7_6, 6_1_4], [6_8_8, 6_0_2, 7_8_8, 6_1_5], [8_0_2, 6_0_2, 8_4_3, 6_1_4], [3_3_7, 6_2_1, 5_0_2, 6_3_3], [5_1_6, 6_2_1, 6_1_5, 6_3_7], [6_2_9, 6_2_1, 7_7_4, 6_3_6], [7_8_9, 6_2_1, 8_2_7, 6_3_3], [3_3_7, 6_3_9, 4_1_8, 6_5_2], [4_3_2, 6_4_0, 5_7_1, 6_5_3], [5_8_7, 6_3_9, 7_3_1, 6_5_5], [7_4_3, 6_3_9, 7_6_9, 6_5_2], [7_8_0, 6_3_9, 8_4_1, 6_5_2], [3_3_8, 6_5_8, 4_4_0, 6_7_3], [4_5_5, 6_5_8, 4_9_1, 6_7_0], [5_0_8, 6_5_8, 6_0_2, 6_7_1], [6_1_6, 6_5_8, 6_3_8, 6_7_0], [6_5_4, 6_5_8, 8_3_5, 6_7_4], [3_3_7, 6_7_7, 4_2_9, 6_8_9], [3_3_7, 7_1_4, 4_8_2, 7_2_6], [4_9_5, 7_1_4, 5_4_8, 7_2_6], [5_6_1, 7_1_4, 6_8_3, 7_2_6], [3_3_8, 7_7_0, 4_6_1, 7_8_2], [4_7_4, 7_6_9, 5_5_4, 7_8_5], [4_8_9, 7_8_8, 5_6_2, 8_0_3], [5_7_6, 7_8_8, 6_4_3, 8_0_1], [6_5_6, 7_8_7, 7_5_1, 8_0_4], [7_6_4, 7_8_8, 8_4_4, 8_0_1], [3_3_4, 8_2_5, 4_2_1, 8_3_8], [4_3_0, 8_2_4, 5_7_4, 8_3_8], [5_8_4, 8_2_4, 7_2_3, 8_4_1], [3_3_5, 8_4_4, 4_5_0, 8_5_7], [4_6_4, 8_4_3, 5_8_3, 8_6_0], [6_2_8, 8_6_2, 7_5_5, 8_7_5], [7_6_9, 8_6_1, 8_4_8, 8_7_8]]] # noqa: E231
# fmt: on
self.assertListEqual(encoding.words , __A )
self.assertListEqual(encoding.boxes , __A )
# with apply_OCR = False
snake_case__ : Tuple = LayoutLMvaImageProcessor(apply_ocr=__A )
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" )
self.assertEqual(encoding.pixel_values.shape , (1, 3, 2_2_4, 2_2_4) )
| 713 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE ( snake_case_ : dict ):
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ):
snake_case__ : Optional[int] = XGBClassifier()
classifier.fit(snake_case_ , snake_case_ )
return classifier
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = load_iris()
snake_case__, snake_case__ : str = data_handling(snake_case_ )
snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split(
snake_case_ , snake_case_ , test_size=0.25 )
snake_case__ : Dict = iris["target_names"]
# Create an XGBoost Classifier from the training data
snake_case__ : Dict = xgboost(snake_case_ , snake_case_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 25 | 0 |
import copy
import inspect
import unittest
from transformers import AutoBackbone
from transformers.configuration_utils import PretrainedConfig
from transformers.testing_utils import require_timm, require_torch, torch_device
from transformers.utils.import_utils import is_torch_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
if is_torch_available():
import torch
from transformers import TimmBackbone, TimmBackboneConfig
from ...test_pipeline_mixin import PipelineTesterMixin
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : Tuple , __A : Tuple=None , __A : str=None , __A : List[str]=None , __A : Dict="resnet50" , __A : Tuple=3 , __A : Optional[int]=3_2 , __A : List[str]=3 , __A : Dict=True , __A : List[str]=True , ):
snake_case__ : List[Any] = parent
snake_case__ : Any = out_indices if out_indices is not None else [4]
snake_case__ : Union[str, Any] = stage_names
snake_case__ : Dict = out_features
snake_case__ : Optional[Any] = backbone
snake_case__ : Optional[Any] = batch_size
snake_case__ : Tuple = image_size
snake_case__ : int = num_channels
snake_case__ : Tuple = use_pretrained_backbone
snake_case__ : Dict = is_training
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Tuple = self.get_config()
return config, pixel_values
def _lowercase ( self : int ):
return TimmBackboneConfig(
image_size=self.image_size , num_channels=self.num_channels , out_features=self.out_features , out_indices=self.out_indices , stage_names=self.stage_names , use_pretrained_backbone=self.use_pretrained_backbone , backbone=self.backbone , )
def _lowercase ( self : str , __A : Dict , __A : Tuple ):
snake_case__ : Optional[Any] = TimmBackbone(config=__lowercase )
model.to(__lowercase )
model.eval()
with torch.no_grad():
snake_case__ : Optional[Any] = model(__lowercase )
self.parent.assertEqual(
result.feature_map[-1].shape , (self.batch_size, model.channels[-1], 1_4, 1_4) , )
def _lowercase ( self : Any ):
snake_case__ : List[Any] = self.prepare_config_and_inputs()
snake_case__, snake_case__ : str = config_and_inputs
snake_case__ : List[Any] = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
@require_timm
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
"""simple docstring"""
a_ = (TimmBackbone,) if is_torch_available() else ()
a_ = {"""feature-extraction""": TimmBackbone} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
def _lowercase ( self : List[Any] ):
snake_case__ : Dict = TimmBackboneModelTester(self )
snake_case__ : List[str] = ConfigTester(self , config_class=__lowercase , has_text_modality=__lowercase )
def _lowercase ( self : Union[str, Any] ):
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Optional[int] ):
snake_case__ : Union[str, Any] = "resnet18"
snake_case__ : Any = "microsoft/resnet-18"
snake_case__ : List[str] = AutoBackbone.from_pretrained(__lowercase , use_timm_backbone=__lowercase )
snake_case__ : List[str] = AutoBackbone.from_pretrained(__lowercase )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(len(timm_model.stage_names ) , len(transformers_model.stage_names ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
# Out indices are set to the last layer by default. For timm models, we don't know
# the number of layers in advance, so we set it to (-1,), whereas for transformers
# models, we set it to [len(stage_names) - 1] (kept for backward compatibility).
self.assertEqual(timm_model.out_indices , (-1,) )
self.assertEqual(transformers_model.out_indices , [len(timm_model.stage_names ) - 1] )
snake_case__ : str = AutoBackbone.from_pretrained(__lowercase , use_timm_backbone=__lowercase , out_indices=[1, 2, 3] )
snake_case__ : Union[str, Any] = AutoBackbone.from_pretrained(__lowercase , out_indices=[1, 2, 3] )
self.assertEqual(timm_model.out_indices , transformers_model.out_indices )
self.assertEqual(len(timm_model.out_features ) , len(transformers_model.out_features ) )
self.assertEqual(timm_model.channels , transformers_model.channels )
@unittest.skip("TimmBackbone doesn't support feed forward chunking" )
def _lowercase ( self : Optional[Any] ):
pass
@unittest.skip("TimmBackbone doesn't have num_hidden_layers attribute" )
def _lowercase ( self : Optional[int] ):
pass
@unittest.skip("TimmBackbone initialization is managed on the timm side" )
def _lowercase ( self : Optional[int] ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def _lowercase ( self : Optional[Any] ):
pass
@unittest.skip("TimmBackbone models doesn't have inputs_embeds" )
def _lowercase ( self : Dict ):
pass
@unittest.skip("TimmBackbone model cannot be created without specifying a backbone checkpoint" )
def _lowercase ( self : Optional[int] ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def _lowercase ( self : Dict ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def _lowercase ( self : Union[str, Any] ):
pass
@unittest.skip("model weights aren't tied in TimmBackbone." )
def _lowercase ( self : List[Any] ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def _lowercase ( self : str ):
pass
@unittest.skip("Only checkpoints on timm can be loaded into TimmBackbone" )
def _lowercase ( self : List[str] ):
pass
@unittest.skip("TimmBackbone doesn't have hidden size info in its configuration." )
def _lowercase ( self : Any ):
pass
@unittest.skip("TimmBackbone doesn't support output_attentions." )
def _lowercase ( self : Dict ):
pass
@unittest.skip("Safetensors is not supported by timm." )
def _lowercase ( self : Optional[int] ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self : Dict ):
pass
def _lowercase ( self : Tuple ):
snake_case__, snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Union[str, Any] = model_class(__lowercase )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Tuple = [*signature.parameters.keys()]
snake_case__ : Union[str, Any] = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __lowercase )
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Optional[Any] = True
snake_case__ : str = self.has_attentions
# no need to test all models as different heads yield the same functionality
snake_case__ : Any = self.all_model_classes[0]
snake_case__ : Optional[Any] = model_class(__lowercase )
model.to(__lowercase )
snake_case__ : Optional[Any] = self._prepare_for_class(__lowercase , __lowercase )
snake_case__ : Optional[Any] = model(**__lowercase )
snake_case__ : Union[str, Any] = outputs[0][-1]
# Encoder-/Decoder-only models
snake_case__ : List[str] = outputs.hidden_states[0]
hidden_states.retain_grad()
if self.has_attentions:
snake_case__ : List[Any] = outputs.attentions[0]
attentions.retain_grad()
output.flatten()[0].backward(retain_graph=__lowercase )
self.assertIsNotNone(hidden_states.grad )
if self.has_attentions:
self.assertIsNotNone(attentions.grad )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : Tuple = model(**__lowercase )
self.assertEqual(len(result.feature_maps ) , len(config.out_indices ) )
self.assertEqual(len(model.channels ) , len(config.out_indices ) )
# Check output of last stage is taken if out_features=None, out_indices=None
snake_case__ : List[str] = copy.deepcopy(__lowercase )
snake_case__ : Union[str, Any] = None
snake_case__ : Union[str, Any] = model_class(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : List[Any] = model(**__lowercase )
self.assertEqual(len(result.feature_maps ) , 1 )
self.assertEqual(len(model.channels ) , 1 )
# Check backbone can be initialized with fresh weights
snake_case__ : Union[str, Any] = copy.deepcopy(__lowercase )
snake_case__ : Any = False
snake_case__ : Union[str, Any] = model_class(__lowercase )
model.to(__lowercase )
model.eval()
snake_case__ : int = model(**__lowercase )
| 714 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ):
snake_case__ : Tuple = args.log_outputs
snake_case__ : Union[str, Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
snake_case__ : List[str] = load_metric("wer" )
snake_case__ : List[str] = load_metric("cer" )
# compute metrics
snake_case__ : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
snake_case__ : List[str] = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
snake_case__ : Dict = F'''WER: {wer_result}\nCER: {cer_result}'''
print(snake_case_ )
with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case__ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt'''
snake_case__ : int = F'''log_{dataset_id}_targets.txt'''
with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Union[str, Any] , snake_case_ : Any ):
p.write(F'''{i}''' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(F'''{i}''' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case_ , with_indices=snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : List[Any] = "[,?.!\-\;\:\"โ%โโ๏ฟฝโโโฆโ]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case__ : Optional[int] = re.sub(snake_case_ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case__ : Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
snake_case__ : Optional[int] = " ".join(text.split(snake_case_ ) )
return text
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# load dataset
snake_case__ : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case__ : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case__ : List[Any] = feature_extractor.sampling_rate
# resample audio
snake_case__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
snake_case__ : int = 0 if torch.cuda.is_available() else -1
snake_case__ : List[str] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Any ):
snake_case__ : Union[str, Any] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case__ : Optional[int] = prediction["text"]
snake_case__ : Optional[Any] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
snake_case__ : Any = dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with ๐ค Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with ๐ค Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__lowerCamelCase : str = parser.parse_args()
main(args)
| 25 | 0 |
import argparse
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
########################################################################
# This is a fully working simple example to use Accelerate,
# specifically showcasing how to properly calculate the metrics on the
# validation dataset when in a distributed system, and builds off the
# `nlp_example.py` script.
#
# This example trains a Bert base model on GLUE MRPC
# in any of the following settings (with the same script):
# - single CPU or single GPU
# - multi GPUS (using PyTorch distributed mode)
# - (multi) TPUs
# - fp16 (mixed-precision) or fp32 (normal precision)
#
# To help focus on the differences in the code, building `DataLoaders`
# was refactored into its own function.
# New additions from the base script can be found quickly by
# looking for the # New Code # tags
#
# To run it in each of these various modes, follow the instructions
# in the readme for examples:
# https://github.com/huggingface/accelerate/tree/main/examples
#
########################################################################
__lowerCamelCase : Any = 16
__lowerCamelCase : str = 32
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : List[str] = 16 ):
snake_case__ : int = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case__ : Optional[int] = load_dataset("glue" , "mrpc" )
def tokenize_function(snake_case_ : List[str] ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Optional[Any] = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=_lowerCAmelCase , max_length=_lowerCAmelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
# starting with the main process first:
with accelerator.main_process_first():
snake_case__ : Any = datasets.map(
_lowerCAmelCase , batched=_lowerCAmelCase , remove_columns=["idx", "sentence1", "sentence2"] , )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : Dict = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(snake_case_ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
snake_case__ : List[str] = 128 if accelerator.distributed_type == DistributedType.TPU else None
# When using mixed precision we want round multiples of 8/16
if accelerator.mixed_precision == "fp8":
snake_case__ : Dict = 16
elif accelerator.mixed_precision != "no":
snake_case__ : List[Any] = 8
else:
snake_case__ : Any = None
return tokenizer.pad(
_lowerCAmelCase , padding="longest" , max_length=_lowerCAmelCase , pad_to_multiple_of=_lowerCAmelCase , return_tensors="pt" , )
# Instantiate dataloaders.
snake_case__ : Union[str, Any] = DataLoader(
tokenized_datasets["train"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
snake_case__ : Union[str, Any] = DataLoader(
tokenized_datasets["validation"] , shuffle=_lowerCAmelCase , collate_fn=_lowerCAmelCase , batch_size=_lowerCAmelCase )
return train_dataloader, eval_dataloader
# For testing only
if os.environ.get("""TESTING_MOCKED_DATALOADERS""", None) == "1":
from accelerate.test_utils.training import mocked_dataloaders
__lowerCamelCase : str = mocked_dataloaders # noqa: F811
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Optional[Any] ):
# For testing only
if os.environ.get("TESTING_MOCKED_DATALOADERS" , _lowerCAmelCase ) == "1":
snake_case__ : List[Any] = 2
# Initialize accelerator
snake_case__ : Union[str, Any] = Accelerator(cpu=args.cpu , mixed_precision=args.mixed_precision )
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : Tuple = config["lr"]
snake_case__ : Optional[int] = int(config["num_epochs"] )
snake_case__ : Any = int(config["seed"] )
snake_case__ : Dict = int(config["batch_size"] )
snake_case__ : Tuple = evaluate.load("glue" , "mrpc" )
# If the batch size is too big we use gradient accumulation
snake_case__ : int = 1
if batch_size > MAX_GPU_BATCH_SIZE and accelerator.distributed_type != DistributedType.TPU:
snake_case__ : Optional[int] = batch_size // MAX_GPU_BATCH_SIZE
snake_case__ : int = MAX_GPU_BATCH_SIZE
set_seed(_lowerCAmelCase )
snake_case__ : Optional[Any] = get_dataloaders(_lowerCAmelCase , _lowerCAmelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Optional[int] = AutoModelForSequenceClassification.from_pretrained("bert-base-cased" , return_dict=_lowerCAmelCase )
# We could avoid this line since the accelerator is set with `device_placement=True` (default value).
# Note that if you are placing tensors on devices manually, this line absolutely needs to be before the optimizer
# creation otherwise training will not work on TPU (`accelerate` will kindly throw an error to make us aware of that).
snake_case__ : Tuple = model.to(accelerator.device )
# Instantiate optimizer
snake_case__ : List[str] = AdamW(params=model.parameters() , lr=_lowerCAmelCase )
# Instantiate scheduler
snake_case__ : Any = get_linear_schedule_with_warmup(
optimizer=_lowerCAmelCase , num_warmup_steps=100 , num_training_steps=(len(_lowerCAmelCase ) * num_epochs) // gradient_accumulation_steps , )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__ : Optional[Any] = accelerator.prepare(
_lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase , _lowerCAmelCase )
# Now we train the model
for epoch in range(_lowerCAmelCase ):
model.train()
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
snake_case__ : Any = model(**_lowerCAmelCase )
snake_case__ : Optional[Any] = outputs.loss
snake_case__ : List[Any] = loss / gradient_accumulation_steps
accelerator.backward(_lowerCAmelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
model.eval()
snake_case__ : List[str] = 0
for step, batch in enumerate(_lowerCAmelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : str = model(**_lowerCAmelCase )
snake_case__ : List[str] = outputs.logits.argmax(dim=-1 )
snake_case__ : Union[str, Any] = accelerator.gather((predictions, batch["labels"]) )
# New Code #
# First we check if it's a distributed system
if accelerator.use_distributed:
# Then see if we're on the last batch of our eval dataloader
if step == len(_lowerCAmelCase ) - 1:
# Last batch needs to be truncated on distributed systems as it contains additional samples
snake_case__ : Union[str, Any] = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ : Any = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
# Otherwise we add the number of samples seen
samples_seen += references.shape[0]
# All of this can be avoided if you use `Accelerator.gather_for_metrics` instead of `Accelerator.gather`:
# accelerator.gather_for_metrics((predictions, batch["labels"]))
metric.add_batch(
predictions=_lowerCAmelCase , references=_lowerCAmelCase , )
snake_case__ : List[str] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , _lowerCAmelCase )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = argparse.ArgumentParser(description="Simple example of training script." )
parser.add_argument(
"--mixed_precision" , type=_lowerCAmelCase , default=_lowerCAmelCase , choices=["no", "fp16", "bf16", "fp8"] , help="Whether to use mixed precision. Choose"
"between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >= 1.10."
"and an Nvidia Ampere GPU." , )
parser.add_argument("--cpu" , action="store_true" , help="If passed, will train on the CPU." )
snake_case__ : Dict = parser.parse_args()
snake_case__ : Any = {"lr": 2E-5, "num_epochs": 3, "seed": 42, "batch_size": 16}
training_function(_lowerCAmelCase , _lowerCAmelCase )
if __name__ == "__main__":
main()
| 715 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
a_ = Features({"text": Value("string" )} )
a_ = Features({"labels": ClassLabel} )
a_ = "text"
a_ = "labels"
def _lowercase ( self : Tuple , __A : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
snake_case__ : Any = copy.deepcopy(self )
snake_case__ : Optional[Any] = self.label_schema.copy()
snake_case__ : List[str] = features[self.label_column]
snake_case__ : Dict = label_schema
return task_template
@property
def _lowercase ( self : Tuple ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 25 | 0 |
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
__lowerCamelCase : List[str] = logging.getLogger(__name__)
__lowerCamelCase : str = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
__lowerCamelCase : int = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = field(
default=UpperCamelCase__ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
a_ = field(
default=UpperCamelCase__ , metadata={"help": "If training from scratch, pass a model type from the list: " + ", ".join(UpperCamelCase__ )} , )
a_ = field(
default=UpperCamelCase__ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
a_ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained config name or path if not the same as model_name"} )
a_ = field(
default=UpperCamelCase__ , metadata={"help": "Pretrained tokenizer name or path if not the same as model_name"} )
a_ = field(
default=UpperCamelCase__ , metadata={"help": "Where do you want to store the pretrained models downloaded from huggingface.co"} , )
a_ = field(
default=UpperCamelCase__ , metadata={"help": "Whether to use one of the fast tokenizer (backed by the tokenizers library) or not."} , )
a_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
a_ = field(
default=UpperCamelCase__ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
def _lowercase ( self : Any ):
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"--config_overrides can't be used in combination with --config_name or --model_name_or_path" )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = field(
default=UpperCamelCase__ , metadata={"help": "The name of the dataset to use (via the datasets library)."} )
a_ = field(
default=UpperCamelCase__ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
a_ = field(default=UpperCamelCase__ , metadata={"help": "The input training data file (a text file)."} )
a_ = field(
default=UpperCamelCase__ , metadata={"help": "An optional input evaluation data file to evaluate the perplexity on (a text file)."} , )
a_ = field(
default=UpperCamelCase__ , metadata={"help": "An optional input train ref data file for whole word masking in Chinese."} , )
a_ = field(
default=UpperCamelCase__ , metadata={"help": "An optional input validation ref data file for whole word masking in Chinese."} , )
a_ = field(
default=UpperCamelCase__ , metadata={"help": "Overwrite the cached training and evaluation sets"} )
a_ = field(
default=5 , metadata={
"help": "The percentage of the train set used as validation set in case there's no validation split"
} , )
a_ = field(
default=UpperCamelCase__ , metadata={
"help": (
"The maximum total input sequence length after tokenization. Sequences longer "
"than this will be truncated. Default to the max input length of the model."
)
} , )
a_ = field(
default=UpperCamelCase__ , metadata={"help": "The number of processes to use for the preprocessing."} , )
a_ = field(
default=0.15 , metadata={"help": "Ratio of tokens to mask for masked language modeling loss"} )
a_ = field(
default=UpperCamelCase__ , metadata={
"help": (
"Whether to pad all samples to `max_seq_length`. "
"If False, will pad the samples dynamically when batching to the maximum length in the batch."
)
} , )
def _lowercase ( self : List[Any] ):
if self.train_file is not None:
snake_case__ : List[Any] = self.train_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
snake_case__ : Tuple = self.validation_file.split("." )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : str ):
with open(snake_case_ , "r" , encoding="utf-8" ) as f:
snake_case__ : List[Any] = [json.loads(snake_case_ ) for line in f.read().splitlines() if (len(snake_case_ ) > 0 and not line.isspace())]
assert len(snake_case_ ) == len(snake_case_ )
snake_case__ : List[Any] = {c: dataset[c] for c in dataset.column_names}
snake_case__ : int = refs
return Dataset.from_dict(snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[Any] = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__, snake_case__, snake_case__ : Any = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__, snake_case__, snake_case__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
snake_case__ : List[str] = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ : Tuple = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("Training/evaluation parameters %s" , snake_case_ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
snake_case__ : Dict = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
snake_case__ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[:{data_args.validation_split_percentage}%]''' , )
snake_case__ : Optional[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F'''train[{data_args.validation_split_percentage}%:]''' , )
else:
snake_case__ : List[str] = {}
if data_args.train_file is not None:
snake_case__ : Any = data_args.train_file
if data_args.validation_file is not None:
snake_case__ : List[str] = data_args.validation_file
snake_case__ : Union[str, Any] = data_args.train_file.split("." )[-1]
if extension == "txt":
snake_case__ : List[Any] = "text"
snake_case__ : Any = load_dataset(snake_case_ , data_files=snake_case_ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : Optional[Any] = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.config_name , **snake_case_ )
elif model_args.model_name_or_path:
snake_case__ : List[str] = AutoConfig.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
snake_case__ : str = CONFIG_MAPPING[model_args.model_type]()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
snake_case__ : int = {
"cache_dir": model_args.cache_dir,
"use_fast": model_args.use_fast_tokenizer,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
snake_case__ : Tuple = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **snake_case_ )
elif model_args.model_name_or_path:
snake_case__ : str = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
raise ValueError(
"You are instantiating a new tokenizer from scratch. This is not supported by this script."
"You can do it from another script, save it, and load it from here, using --tokenizer_name." )
if model_args.model_name_or_path:
snake_case__ : int = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
snake_case__ : Optional[Any] = AutoModelForMaskedLM.from_config(snake_case_ )
model.resize_token_embeddings(len(snake_case_ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
snake_case__ : Optional[int] = datasets["train"].column_names
else:
snake_case__ : Tuple = datasets["validation"].column_names
snake_case__ : Optional[Any] = "text" if "text" in column_names else column_names[0]
snake_case__ : Dict = "max_length" if data_args.pad_to_max_length else False
def tokenize_function(snake_case_ : str ):
# Remove empty lines
snake_case__ : Optional[Any] = [line for line in examples["text"] if len(snake_case_ ) > 0 and not line.isspace()]
return tokenizer(examples["text"] , padding=snake_case_ , truncation=snake_case_ , max_length=data_args.max_seq_length )
snake_case__ : Any = datasets.map(
snake_case_ , batched=snake_case_ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
snake_case__ : List[Any] = add_chinese_references(tokenized_datasets["train"] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
snake_case__ : List[Any] = add_chinese_references(
tokenized_datasets["validation"] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
snake_case__ : int = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
snake_case__ : Tuple = False
# Data collator
# This one will take care of randomly masking the tokens.
snake_case__ : Any = DataCollatorForWholeWordMask(tokenizer=snake_case_ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
snake_case__ : Union[str, Any] = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=tokenized_datasets["train"] if training_args.do_train else None , eval_dataset=tokenized_datasets["validation"] if training_args.do_eval else None , tokenizer=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
snake_case__ : Optional[Any] = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
snake_case__ : str = model_args.model_name_or_path
else:
snake_case__ : Union[str, Any] = None
snake_case__ : Tuple = trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model() # Saves the tokenizer too for easy upload
snake_case__ : Any = os.path.join(training_args.output_dir , "train_results.txt" )
if trainer.is_world_process_zero():
with open(snake_case_ , "w" ) as writer:
logger.info("***** Train results *****" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , "trainer_state.json" ) )
# Evaluation
snake_case__ : Dict = {}
if training_args.do_eval:
logger.info("*** Evaluate ***" )
snake_case__ : Optional[Any] = trainer.evaluate()
snake_case__ : Any = math.exp(eval_output["eval_loss"] )
snake_case__ : Dict = perplexity
snake_case__ : Tuple = os.path.join(training_args.output_dir , "eval_results_mlm_wwm.txt" )
if trainer.is_world_process_zero():
with open(snake_case_ , "w" ) as writer:
logger.info("***** Eval results *****" )
for key, value in sorted(results.items() ):
logger.info(F''' {key} = {value}''' )
writer.write(F'''{key} = {value}\n''' )
return results
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
main()
if __name__ == "__main__":
main()
| 716 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_vision_model"
def __init__( self : List[Any] , __A : Dict=1_4_0_8 , __A : Tuple=6_1_4_4 , __A : str=3_9 , __A : int=1_6 , __A : str=2_2_4 , __A : Any=1_4 , __A : Dict="gelu" , __A : List[Any]=1e-6 , __A : Any=0.0 , __A : List[Any]=1e-1_0 , __A : Union[str, Any]=True , **__A : Tuple , ):
super().__init__(**__A )
snake_case__ : List[str] = hidden_size
snake_case__ : Optional[int] = intermediate_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : str = patch_size
snake_case__ : int = image_size
snake_case__ : int = initializer_range
snake_case__ : Optional[int] = attention_dropout
snake_case__ : str = layer_norm_eps
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Tuple = qkv_bias
@classmethod
def _lowercase ( cls : List[str] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : str = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : Union[str, Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_qformer"
def __init__( self : Any , __A : Union[str, Any]=3_0_5_2_2 , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=1_2 , __A : Dict=1_2 , __A : Dict=3_0_7_2 , __A : List[str]="gelu" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Any=5_1_2 , __A : Optional[int]=0.0_2 , __A : List[str]=1e-1_2 , __A : Any=0 , __A : Optional[Any]="absolute" , __A : str=2 , __A : Any=1_4_0_8 , **__A : List[str] , ):
super().__init__(pad_token_id=__A , **__A )
snake_case__ : Dict = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : int = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : Dict = cross_attention_frequency
snake_case__ : List[str] = encoder_hidden_size
@classmethod
def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Optional[int] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : Tuple = cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : List[Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip"
a_ = True
def __init__( self : List[str] , __A : Optional[Any]=None , __A : Tuple=None , __A : Optional[int]=None , __A : Optional[Any]=3_2 , **__A : Optional[int] ):
super().__init__(**__A )
if vision_config is None:
snake_case__ : Any = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
snake_case__ : Optional[Any] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
snake_case__ : Optional[int] = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
snake_case__ : List[Any] = InstructBlipVisionConfig(**__A )
snake_case__ : Union[str, Any] = InstructBlipQFormerConfig(**__A )
snake_case__ : Dict = text_config["model_type"] if "model_type" in text_config else "opt"
snake_case__ : List[Any] = CONFIG_MAPPING[text_model_type](**__A )
snake_case__ : Union[str, Any] = self.text_config.tie_word_embeddings
snake_case__ : Tuple = self.text_config.is_encoder_decoder
snake_case__ : str = num_query_tokens
snake_case__ : Dict = self.vision_config.hidden_size
snake_case__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case__ : int = 1.0
snake_case__ : Optional[int] = 0.0_2
@classmethod
def _lowercase ( cls : List[str] , __A : InstructBlipVisionConfig , __A : InstructBlipQFormerConfig , __A : PretrainedConfig , **__A : int , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def _lowercase ( self : Optional[int] ):
snake_case__ : Any = copy.deepcopy(self.__dict__ )
snake_case__ : Optional[Any] = self.vision_config.to_dict()
snake_case__ : List[str] = self.qformer_config.to_dict()
snake_case__ : List[Any] = self.text_config.to_dict()
snake_case__ : List[Any] = self.__class__.model_type
return output
| 25 | 0 |
from __future__ import annotations
from typing import Generic, TypeVar
__lowerCamelCase : int = TypeVar("""T""")
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : T ):
snake_case__ : int = data
snake_case__ : str = self
snake_case__ : str = 0
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : Any ):
snake_case__ : dict[T, DisjointSetTreeNode[T]] = {}
def _lowercase ( self : Union[str, Any] , __A : T ):
snake_case__ : List[Any] = DisjointSetTreeNode(lowerCamelCase__ )
def _lowercase ( self : int , __A : T ):
snake_case__ : Any = self.map[data]
if elem_ref != elem_ref.parent:
snake_case__ : List[str] = self.find_set(elem_ref.parent.data )
return elem_ref.parent
def _lowercase ( self : List[str] , __A : DisjointSetTreeNode[T] , __A : DisjointSetTreeNode[T] ):
if nodea.rank > nodea.rank:
snake_case__ : Dict = nodea
else:
snake_case__ : Union[str, Any] = nodea
if nodea.rank == nodea.rank:
nodea.rank += 1
def _lowercase ( self : Optional[Any] , __A : T , __A : T ):
self.link(self.find_set(lowerCamelCase__ ) , self.find_set(lowerCamelCase__ ) )
class SCREAMING_SNAKE_CASE__ ( Generic[T] ):
"""simple docstring"""
def __init__( self : int ):
snake_case__ : dict[T, dict[T, int]] = {}
def _lowercase ( self : Union[str, Any] , __A : T ):
if node not in self.connections:
snake_case__ : List[str] = {}
def _lowercase ( self : List[str] , __A : T , __A : T , __A : int ):
self.add_node(lowerCamelCase__ )
self.add_node(lowerCamelCase__ )
snake_case__ : Dict = weight
snake_case__ : str = weight
def _lowercase ( self : List[Any] ):
snake_case__ : str = []
snake_case__ : Any = set()
for start in self.connections:
for end in self.connections[start]:
if (start, end) not in seen:
seen.add((end, start) )
edges.append((start, end, self.connections[start][end]) )
edges.sort(key=lambda __A : x[2] )
# creating the disjoint set
snake_case__ : Tuple = DisjointSetTree[T]()
for node in self.connections:
disjoint_set.make_set(lowerCamelCase__ )
# MST generation
snake_case__ : List[Any] = 0
snake_case__ : Tuple = 0
snake_case__ : str = GraphUndirectedWeighted[T]()
while num_edges < len(self.connections ) - 1:
snake_case__ : Union[str, Any] = edges[index]
index += 1
snake_case__ : Union[str, Any] = disjoint_set.find_set(lowerCamelCase__ )
snake_case__ : int = disjoint_set.find_set(lowerCamelCase__ )
if parent_u != parent_v:
num_edges += 1
graph.add_edge(lowerCamelCase__ , lowerCamelCase__ , lowerCamelCase__ )
disjoint_set.union(lowerCamelCase__ , lowerCamelCase__ )
return graph
| 717 |
def SCREAMING_SNAKE_CASE ( snake_case_ : list ):
if len(snake_case_ ) <= 1:
return lst
snake_case__ : List[Any] = 1
while i < len(snake_case_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
snake_case__, snake_case__ : Tuple = lst[i], lst[i - 1]
i -= 1
if i == 0:
snake_case__ : Union[str, Any] = 1
return lst
if __name__ == "__main__":
__lowerCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 25 | 0 |
import os
from math import logaa
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] = "base_exp.txt" ):
snake_case__ : Tuple = 0
snake_case__ : str = 0
for i, line in enumerate(open(os.path.join(os.path.dirname(lowercase__ ) , lowercase__ ) ) ):
snake_case__, snake_case__ : Tuple = list(map(lowercase__ , line.split("," ) ) )
if x * logaa(lowercase__ ) > largest:
snake_case__ : int = x * logaa(lowercase__ )
snake_case__ : Union[str, Any] = i + 1
return result
if __name__ == "__main__":
print(solution())
| 718 |
from __future__ import annotations
import time
__lowerCamelCase : str = list[tuple[int, int]]
__lowerCamelCase : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ):
snake_case__ : Optional[int] = pos_x
snake_case__ : Dict = pos_y
snake_case__ : int = (pos_y, pos_x)
snake_case__ : Optional[int] = goal_x
snake_case__ : Tuple = goal_y
snake_case__ : str = parent
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ):
snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A )
snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A )
snake_case__ : int = [self.start]
snake_case__ : Union[str, Any] = False
def _lowercase ( self : Dict ):
while self.node_queue:
snake_case__ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ : Optional[Any] = True
return self.retrace_path(__A )
snake_case__ : int = self.get_successors(__A )
for node in successors:
self.node_queue.append(__A )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Union[str, Any] , __A : Node ):
snake_case__ : str = []
for action in delta:
snake_case__ : str = parent.pos_x + action[1]
snake_case__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) )
return successors
def _lowercase ( self : Optional[Any] , __A : Node | None ):
snake_case__ : Tuple = node
snake_case__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : Tuple = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : str , __A : int ):
snake_case__ : str = BreadthFirstSearch(__A , __A )
snake_case__ : int = BreadthFirstSearch(__A , __A )
snake_case__ : Tuple = False
def _lowercase ( self : Optional[Any] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 )
snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ : List[str] = True
return self.retrace_bidirectional_path(
__A , __A )
snake_case__ : Union[str, Any] = current_bwd_node
snake_case__ : Dict = current_fwd_node
snake_case__ : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(__A ),
self.bwd_bfs: self.bwd_bfs.get_successors(__A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Any , __A : Node , __A : Node ):
snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A )
snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowerCamelCase : str = (0, 0)
__lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : Any = time.time()
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal)
__lowerCamelCase : str = bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
__lowerCamelCase : Optional[Any] = time.time()
__lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
__lowerCamelCase : str = bd_bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 25 | 0 |
import tempfile
import unittest
from transformers import SPIECE_UNDERLINE, BatchEncoding, PLBartTokenizer, is_torch_available
from transformers.testing_utils import (
get_tests_dir,
nested_simplify,
require_sentencepiece,
require_tokenizers,
require_torch,
)
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : Any = get_tests_dir("""fixtures/test_sentencepiece.model""")
if is_torch_available():
from transformers.models.plbart.modeling_plbart import shift_tokens_right
__lowerCamelCase : str = 5_0003
__lowerCamelCase : int = 5_0002
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( __lowercase , unittest.TestCase ):
"""simple docstring"""
a_ = PLBartTokenizer
a_ = None
a_ = False
def _lowercase ( self : int ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : List[Any] = PLBartTokenizer(__a , language_codes="base" , keep_accents=__a )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : int ):
snake_case__ : Optional[Any] = PLBartTokenizer(__a , language_codes="base" , keep_accents=__a )
snake_case__ : List[str] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["โThis", "โis", "โa", "โt", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : Optional[Any] = tokenizer.tokenize("I was born in 92000, and this is falsรฉ." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"รฉ",
".",
] , )
snake_case__ : str = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
snake_case__ : Dict = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
snake_case__ : List[Any] = tokenizer.vocab_size
snake_case__ : str = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 4 , __a )]
self.assertListEqual(__a , ["__java__", "__python__", "__en_XX__", "<mask>"] )
snake_case__ : Optional[Any] = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
snake_case__ : Optional[int] = tokenizer(__a ).input_ids
self.assertEqual(
tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , )
def _lowercase ( self : Optional[int] ):
snake_case__ : Optional[Any] = PLBartTokenizer(__a , language_codes="multi" , keep_accents=__a )
snake_case__ : Optional[int] = tokenizer.tokenize("This is a test" )
self.assertListEqual(__a , ["โThis", "โis", "โa", "โt", "est"] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(__a ) , [value + tokenizer.fairseq_offset for value in [2_8_5, 4_6, 1_0, 1_7_0, 3_8_2]] , )
snake_case__ : int = tokenizer.tokenize("I was born in 92000, and this is falsรฉ." )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"9",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"รฉ",
".",
] , )
snake_case__ : Optional[Any] = tokenizer.convert_tokens_to_ids(__a )
self.assertListEqual(
__a , [
value + tokenizer.fairseq_offset
for value in [8, 2_1, 8_4, 5_5, 2_4, 1_9, 7, 2, 6_0_2, 3_4_7, 3_4_7, 3_4_7, 3, 1_2, 6_6, 4_6, 7_2, 8_0, 6, 2, 4]
] , )
snake_case__ : str = tokenizer.convert_ids_to_tokens(__a )
self.assertListEqual(
__a , [
SPIECE_UNDERLINE + "I",
SPIECE_UNDERLINE + "was",
SPIECE_UNDERLINE + "b",
"or",
"n",
SPIECE_UNDERLINE + "in",
SPIECE_UNDERLINE + "",
"<unk>",
"2",
"0",
"0",
"0",
",",
SPIECE_UNDERLINE + "and",
SPIECE_UNDERLINE + "this",
SPIECE_UNDERLINE + "is",
SPIECE_UNDERLINE + "f",
"al",
"s",
"<unk>",
".",
] , )
snake_case__ : Tuple = tokenizer.vocab_size
snake_case__ : int = [tokenizer.convert_ids_to_tokens(__a ) for x in range(end - 7 , __a )]
self.assertListEqual(
__a , ["__java__", "__python__", "__en_XX__", "__javascript__", "__php__", "__ruby__", "__go__"] )
snake_case__ : Tuple = """java.lang.Exception, python.lang.Exception, javascript, php, ruby, go"""
snake_case__ : List[Any] = tokenizer(__a ).input_ids
self.assertEqual(
tokenizer.decode(__a , skip_special_tokens=__a , clean_up_tokenization_spaces=__a ) , __a , )
@require_torch
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a_ = '''uclanlp/plbart-python-en_XX'''
a_ = [
'''def maximum(a,b,c):NEW_LINE_INDENTreturn max([a,b,c])''',
'''def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])''',
]
a_ = [
'''Returns the maximum value of a b c.''',
'''Sums the values of a b c.''',
]
a_ = [
1_3_4,
5_4_5_2,
3_3_4_6_0,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
9_8_8,
2_0,
3_3_4_5_6,
1_9,
3_3_4_5_6,
7_7_1,
3_9,
4_2_5_8,
8_8_9,
3_3_1_8,
3_3_4_4_1,
3_3_4_6_3,
3_3_4_6_5,
3_3_4_6_3,
3_3_4_4_9,
2_4_7_1,
2,
PYTHON_CODE,
]
@classmethod
def _lowercase ( cls : Any ):
snake_case__ : PLBartTokenizer = PLBartTokenizer.from_pretrained(
cls.checkpoint_name , language_codes="base" , src_lang="python" , tgt_lang="en_XX" )
snake_case__ : int = 1
return cls
def _lowercase ( self : Tuple ):
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__java__"] , 5_0_0_0_1 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__python__"] , 5_0_0_0_2 )
self.assertEqual(self.tokenizer.fairseq_tokens_to_ids["__en_XX__"] , 5_0_0_0_3 )
def _lowercase ( self : List[str] ):
snake_case__ : Any = self.tokenizer.batch_encode_plus(self.src_text ).input_ids[0]
self.assertListEqual(self.expected_src_tokens , __a )
def _lowercase ( self : Optional[int] ):
self.assertIn(__a , self.tokenizer.all_special_ids )
snake_case__ : List[str] = [EN_CODE, 9_0_3_7, 3_3_4_4_2, 5_7, 7_5_2, 1_5_3, 1_4, 5_6, 1_8, 9, 2]
snake_case__ : int = self.tokenizer.decode(__a , skip_special_tokens=__a )
snake_case__ : List[str] = self.tokenizer.decode(generated_ids[1:] , skip_special_tokens=__a )
self.assertEqual(__a , __a )
self.assertNotIn(self.tokenizer.eos_token , __a )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : List[Any] = ["""def sum(a,b,c):NEW_LINE_INDENTreturn sum([a,b,c])""" * 2_0]
self.assertIsInstance(src_text[0] , __a )
snake_case__ : Dict = 1_0
snake_case__ : str = self.tokenizer(__a , max_length=__a , truncation=__a ).input_ids[0]
self.assertEqual(ids[-2] , 2 )
self.assertEqual(ids[-1] , __a )
self.assertEqual(len(__a ) , __a )
def _lowercase ( self : Optional[Any] ):
self.assertListEqual(self.tokenizer.convert_tokens_to_ids(["<mask>", "__java__"] ) , [5_0_0_0_4, 5_0_0_0_1] )
def _lowercase ( self : int ):
snake_case__ : Optional[Any] = tempfile.mkdtemp()
snake_case__ : Optional[Any] = self.tokenizer.fairseq_tokens_to_ids
self.tokenizer.save_pretrained(__a )
snake_case__ : str = PLBartTokenizer.from_pretrained(__a )
self.assertDictEqual(new_tok.fairseq_tokens_to_ids , __a )
@require_torch
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[int] = self.tokenizer(self.src_text , text_target=self.tgt_text , padding=__a , return_tensors="pt" )
snake_case__ : int = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
# fairseq batch: https://gist.github.com/sshleifer/cba08bc2109361a74ac3760a7e30e4f4
self.assertEqual(batch.input_ids[1][-2:].tolist() , [2, PYTHON_CODE] )
self.assertEqual(batch.decoder_input_ids[1][0] , __a )
self.assertEqual(batch.decoder_input_ids[1][-1] , 2 )
self.assertEqual(batch.labels[1][-2:].tolist() , [2, EN_CODE] )
@require_torch
def _lowercase ( self : Optional[int] ):
snake_case__ : Tuple = self.tokenizer(
self.src_text , text_target=self.tgt_text , padding=__a , truncation=__a , max_length=len(self.expected_src_tokens ) , return_tensors="pt" , )
snake_case__ : Tuple = shift_tokens_right(batch["labels"] , self.tokenizer.pad_token_id )
self.assertIsInstance(__a , __a )
self.assertEqual((2, 2_6) , batch.input_ids.shape )
self.assertEqual((2, 2_6) , batch.attention_mask.shape )
snake_case__ : List[Any] = batch.input_ids.tolist()[0]
self.assertListEqual(self.expected_src_tokens , __a )
self.assertEqual(2 , batch.decoder_input_ids[0, -1] ) # EOS
# Test that special tokens are reset
self.assertEqual(self.tokenizer.prefix_tokens , [] )
self.assertEqual(self.tokenizer.suffix_tokens , [self.tokenizer.eos_token_id, PYTHON_CODE] )
def _lowercase ( self : Optional[int] ):
snake_case__ : Dict = self.tokenizer(self.src_text , padding=__a , truncation=__a , max_length=3 , return_tensors="pt" )
snake_case__ : List[str] = self.tokenizer(
text_target=self.tgt_text , padding=__a , truncation=__a , max_length=1_0 , return_tensors="pt" )
snake_case__ : Optional[int] = targets["""input_ids"""]
snake_case__ : int = shift_tokens_right(__a , self.tokenizer.pad_token_id )
self.assertEqual(batch.input_ids.shape[1] , 3 )
self.assertEqual(batch.decoder_input_ids.shape[1] , 1_0 )
@require_torch
def _lowercase ( self : List[Any] ):
snake_case__ : List[str] = self.tokenizer._build_translation_inputs(
"A test" , return_tensors="pt" , src_lang="en_XX" , tgt_lang="java" )
self.assertEqual(
nested_simplify(__a ) , {
# A, test, EOS, en_XX
"input_ids": [[1_5_0, 2_4_2, 2, 5_0_0_0_3]],
"attention_mask": [[1, 1, 1, 1]],
# java
"forced_bos_token_id": 5_0_0_0_1,
} , )
| 719 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __A : Dict , __A : int=7 , __A : Optional[Any]=3 , __A : List[str]=3_0 , __A : List[Any]=4_0_0 , __A : Union[str, Any]=True , __A : List[Any]=None , __A : Optional[Any]=True , __A : Tuple=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : Any=1 / 2_5_5 , __A : Optional[int]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : str = min_resolution
snake_case__ : Tuple = max_resolution
snake_case__ : List[Any] = do_resize
snake_case__ : Dict = size
snake_case__ : List[str] = do_normalize
snake_case__ : Optional[int] = image_mean
snake_case__ : Optional[int] = image_std
snake_case__ : Any = do_rescale
snake_case__ : Optional[int] = rescale_factor
snake_case__ : int = do_pad
def _lowercase ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[int] , __A : Dict , __A : List[Any]=False ):
if not batched:
snake_case__ : List[str] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : Tuple = image.size
else:
snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case__ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case__ : List[Any] = self.size["shortest_edge"]
snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Dict = self.size["shortest_edge"]
snake_case__ : Dict = self.size["shortest_edge"]
else:
snake_case__ : str = []
for image in image_inputs:
snake_case__, snake_case__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Dict = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : Tuple = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : int ):
snake_case__ : Tuple = ConditionalDetrImageProcessingTester(self )
@property
def _lowercase ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Any ):
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : List[str] ):
snake_case__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : Union[str, Any] ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : str = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : List[Any] ):
# prepare image and target
snake_case__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Union[str, Any] = json.loads(f.read() )
snake_case__ : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : Tuple = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case__ : int = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : str ):
# prepare image, target and masks_path
snake_case__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : int = json.loads(f.read() )
snake_case__ : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : Optional[int] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : str = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 0 |
import functools
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str ):
snake_case__ : Any = len(_UpperCamelCase )
snake_case__ : Any = len(_UpperCamelCase )
@functools.cache
def min_distance(snake_case_ : int , snake_case_ : int ) -> int:
# if first word index is overflow - delete all from the second word
if indexa >= len_worda:
return len_worda - indexa
# if second word index is overflow - delete all from the first word
if indexa >= len_worda:
return len_worda - indexa
snake_case__ : str = int(worda[indexa] != worda[indexa] ) # current letters not identical
return min(
1 + min_distance(indexa + 1 , _UpperCamelCase ) , 1 + min_distance(_UpperCamelCase , indexa + 1 ) , diff + min_distance(indexa + 1 , indexa + 1 ) , )
return min_distance(0 , 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 720 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__lowerCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__lowerCamelCase : str = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using KullbackโLeibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__lowerCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def _lowercase ( self : Union[str, Any] , __A : Dict , __A : List[str] , __A : int=None , __A : List[Any]=None , __A : Optional[int]=None , __A : List[Any]=None , __A : Union[str, Any]="auto" , __A : Optional[Any]=-1 , __A : Optional[Any]=0.9 , __A : Any=5 , __A : List[Any]=5_0_0 , __A : Tuple="gpt2-large" , __A : Optional[Any]=-1 , __A : str=1_0_2_4 , __A : Tuple=2_5 , __A : str=5 , __A : Optional[int]=True , __A : Any=2_5 , ):
snake_case__ : List[Any] = compute_mauve(
p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , )
return out
| 25 | 0 |
import functools
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : list[int] ):
# Validation
if not isinstance(snake_case_ , snake_case_ ) or not all(isinstance(snake_case_ , snake_case_ ) for day in days ):
raise ValueError("The parameter days should be a list of integers" )
if len(snake_case_ ) != 3 or not all(isinstance(snake_case_ , snake_case_ ) for cost in costs ):
raise ValueError("The parameter costs should be a list of three integers" )
if len(snake_case_ ) == 0:
return 0
if min(snake_case_ ) <= 0:
raise ValueError("All days elements should be greater than 0" )
if max(snake_case_ ) >= 366:
raise ValueError("All days elements should be less than 366" )
snake_case__ : List[Any] = set(snake_case_ )
@functools.cache
def dynamic_programming(snake_case_ : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 721 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCamelCase : Union[str, Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCamelCase : List[Any] = concatenate_datasets
__lowerCamelCase : List[str] = DownloadConfig
__lowerCamelCase : Union[str, Any] = DownloadManager
__lowerCamelCase : str = DownloadMode
__lowerCamelCase : Union[str, Any] = DownloadConfig
__lowerCamelCase : List[str] = DownloadMode
__lowerCamelCase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 25 | 0 |
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SwiftFormerConfig,
SwiftFormerForImageClassification,
ViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : List[Any] = logging.get_logger(__name__)
__lowerCamelCase : str = torch.device("""cpu""")
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : List[Any] = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case__ : Optional[int] = Image.open(requests.get(_SCREAMING_SNAKE_CASE , stream=_SCREAMING_SNAKE_CASE ).raw )
return im
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
if swiftformer_name == "swiftformer_xs":
return torch.tensor([-2.1703E00, 2.1107E00, -2.0811E00, 8.8685E-01, 2.4360E-01] )
elif swiftformer_name == "swiftformer_s":
return torch.tensor([3.9636E-01, 2.3478E-01, -1.6963E00, -1.7381E00, -8.6337E-01] )
elif swiftformer_name == "swiftformer_l1":
return torch.tensor([-4.2768E-01, -4.7429E-01, -1.0897E00, -1.0248E00, 3.5523E-02] )
elif swiftformer_name == "swiftformer_l3":
return torch.tensor([-2.5330E-01, 2.4211E-01, -6.0185E-01, -8.2789E-01, -6.0446E-02] )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : str ):
snake_case__ : Any = dct.pop(_SCREAMING_SNAKE_CASE )
snake_case__ : Dict = val
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
snake_case__ : int = []
for k in state_dict.keys():
snake_case__ : Dict = k
if ".pwconv" in k:
snake_case__ : Union[str, Any] = k_new.replace(".pwconv" , ".point_wise_conv" )
if ".dwconv" in k:
snake_case__ : List[Any] = k_new.replace(".dwconv" , ".depth_wise_conv" )
if ".Proj." in k:
snake_case__ : Union[str, Any] = k_new.replace(".Proj." , ".proj." )
if "patch_embed" in k_new:
snake_case__ : Union[str, Any] = k_new.replace("patch_embed" , "swiftformer.patch_embed.patch_embedding" )
if "network" in k_new:
snake_case__ : Any = k_new.split("." )
if ls[2].isdigit():
snake_case__ : List[str] = "swiftformer.encoder.network." + ls[1] + ".blocks." + ls[2] + "." + ".".join(ls[3:] )
else:
snake_case__ : List[Any] = k_new.replace("network" , "swiftformer.encoder.network" )
rename_keys.append((k, k_new) )
return rename_keys
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : int , snake_case_ : List[str] ):
snake_case__ : int = SwiftFormerConfig()
# dataset (ImageNet-21k only or also fine-tuned on ImageNet 2012), patch_size and image_size
snake_case__ : Optional[Any] = 1000
snake_case__ : Optional[Any] = "huggingface/label-files"
snake_case__ : int = "imagenet-1k-id2label.json"
snake_case__ : List[Any] = json.load(open(hf_hub_download(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , repo_type="dataset" ) , "r" ) )
snake_case__ : int = {int(_SCREAMING_SNAKE_CASE ): v for k, v in idalabel.items()}
snake_case__ : Dict = idalabel
snake_case__ : Optional[Any] = {v: k for k, v in idalabel.items()}
# size of the architecture
if swiftformer_name == "swiftformer_xs":
snake_case__ : List[str] = [3, 3, 6, 4]
snake_case__ : Optional[Any] = [48, 56, 112, 220]
elif swiftformer_name == "swiftformer_s":
snake_case__ : Union[str, Any] = [3, 3, 9, 6]
snake_case__ : Any = [48, 64, 168, 224]
elif swiftformer_name == "swiftformer_l1":
snake_case__ : Optional[Any] = [4, 3, 10, 5]
snake_case__ : Any = [48, 96, 192, 384]
elif swiftformer_name == "swiftformer_l3":
snake_case__ : str = [4, 4, 12, 6]
snake_case__ : str = [64, 128, 320, 512]
# load state_dict of original model, remove and rename some keys
if original_ckpt:
if original_ckpt.startswith("https" ):
snake_case__ : Tuple = torch.hub.load_state_dict_from_url(_SCREAMING_SNAKE_CASE , map_location="cpu" , check_hash=_SCREAMING_SNAKE_CASE )
else:
snake_case__ : List[str] = torch.load(_SCREAMING_SNAKE_CASE , map_location="cpu" )
snake_case__ : List[str] = checkpoint
snake_case__ : Dict = create_rename_keys(_SCREAMING_SNAKE_CASE )
for rename_key_src, rename_key_dest in rename_keys:
rename_key(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# load HuggingFace model
snake_case__ : str = SwiftFormerForImageClassification(_SCREAMING_SNAKE_CASE ).eval()
hf_model.load_state_dict(_SCREAMING_SNAKE_CASE )
# prepare test inputs
snake_case__ : Optional[Any] = prepare_img()
snake_case__ : List[Any] = ViTImageProcessor.from_pretrained("preprocessor_config" )
snake_case__ : List[str] = processor(images=_SCREAMING_SNAKE_CASE , return_tensors="pt" )
# compare outputs from both models
snake_case__ : Dict = get_expected_output(_SCREAMING_SNAKE_CASE )
snake_case__ : List[str] = hf_model(inputs["pixel_values"] ).logits
assert hf_logits.shape == torch.Size([1, 1000] )
assert torch.allclose(hf_logits[0, 0:5] , _SCREAMING_SNAKE_CASE , atol=1E-3 )
Path(_SCREAMING_SNAKE_CASE ).mkdir(exist_ok=_SCREAMING_SNAKE_CASE )
print(F'''Saving model {swiftformer_name} to {pytorch_dump_folder_path}''' )
hf_model.save_pretrained(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
__lowerCamelCase : Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--swiftformer_name""",
default="""swiftformer_xs""",
choices=["""swiftformer_xs""", """swiftformer_s""", """swiftformer_l1""", """swiftformer_l3"""],
type=str,
help="""Name of the SwiftFormer model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""",
default="""./converted_outputs/""",
type=str,
help="""Path to the output PyTorch model directory.""",
)
parser.add_argument("""--original_ckpt""", default=None, type=str, help="""Path to the original model checkpoint.""")
__lowerCamelCase : Tuple = parser.parse_args()
convert_swiftformer_checkpoint(args.swiftformer_name, args.pytorch_dump_folder_path, args.original_ckpt)
| 700 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : str = [True] * limit
snake_case__ : str = False
snake_case__ : str = False
snake_case__ : str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
snake_case__ : Optional[Any] = i * 2
while index < limit:
snake_case__ : Union[str, Any] = False
snake_case__ : Any = index + i
snake_case__ : Optional[Any] = [2]
for i in range(3 , snake_case_ , 2 ):
if is_prime[i]:
primes.append(snake_case_ )
return primes
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000000 ):
snake_case__ : Optional[int] = prime_sieve(snake_case_ )
snake_case__ : List[Any] = 0
snake_case__ : List[str] = 0
for i in range(len(snake_case_ ) ):
for j in range(i + length , len(snake_case_ ) ):
snake_case__ : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
snake_case__ : Tuple = j - i
snake_case__ : str = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 25 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
"""configuration_whisper""": ["""WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """WhisperConfig""", """WhisperOnnxConfig"""],
"""feature_extraction_whisper""": ["""WhisperFeatureExtractor"""],
"""processing_whisper""": ["""WhisperProcessor"""],
"""tokenization_whisper""": ["""WhisperTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[int] = ["""WhisperTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""WhisperForConditionalGeneration""",
"""WhisperModel""",
"""WhisperPreTrainedModel""",
"""WhisperForAudioClassification""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFWhisperForConditionalGeneration""",
"""TFWhisperModel""",
"""TFWhisperPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""FlaxWhisperForConditionalGeneration""",
"""FlaxWhisperModel""",
"""FlaxWhisperPreTrainedModel""",
"""FlaxWhisperForAudioClassification""",
]
if TYPE_CHECKING:
from .configuration_whisper import WHISPER_PRETRAINED_CONFIG_ARCHIVE_MAP, WhisperConfig, WhisperOnnxConfig
from .feature_extraction_whisper import WhisperFeatureExtractor
from .processing_whisper import WhisperProcessor
from .tokenization_whisper import WhisperTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_whisper_fast import WhisperTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_whisper import (
WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
WhisperForAudioClassification,
WhisperForConditionalGeneration,
WhisperModel,
WhisperPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_whisper import (
TF_WHISPER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWhisperForConditionalGeneration,
TFWhisperModel,
TFWhisperPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_whisper import (
FlaxWhisperForAudioClassification,
FlaxWhisperForConditionalGeneration,
FlaxWhisperModel,
FlaxWhisperPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 701 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Optional[Any] = parent
snake_case__ : str = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : Optional[Any] = min_resolution
snake_case__ : List[str] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : str = size
snake_case__ : str = do_normalize
snake_case__ : Optional[Any] = image_mean
snake_case__ : List[str] = image_std
snake_case__ : List[str] = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : Tuple = do_pad
def _lowercase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ):
if not batched:
snake_case__ : List[Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : str = image.size
else:
snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Any = int(self.size["shortest_edge"] * h / w )
snake_case__ : Any = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Any = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Tuple = self.size["shortest_edge"]
snake_case__ : int = self.size["shortest_edge"]
else:
snake_case__ : Any = []
for image in image_inputs:
snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : int = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Tuple ):
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : str ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : int ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[Any] ):
# prepare image and target
snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Tuple = json.loads(f.read() )
snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : str = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image, target and masks_path
snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 0 |
from google.protobuf import descriptor as _descriptor
from google.protobuf import descriptor_pool as _descriptor_pool
from google.protobuf import symbol_database as _symbol_database
from google.protobuf.internal import builder as _builder
# @@protoc_insertion_point(imports)
__lowerCamelCase : List[Any] = _symbol_database.Default()
__lowerCamelCase : Union[str, Any] = _descriptor_pool.Default().AddSerializedFile(
b"""\n\x19sentencepiece_model.proto\x12\rsentencepiece\"\x80\x0c\n\x0bTrainerSpec\x12\r\n\x05input\x18\x01 \x03(\t\x12\x14\n\x0cinput_format\x18\x07 \x01(\t\x12\x14\n\x0cmodel_prefix\x18\x02 \x01(\t\x12\x41\n\nmodel_type\x18\x03 \x01(\x0e\x32$.sentencepiece.TrainerSpec.ModelType:\x07UNIGRAM\x12\x18\n\nvocab_size\x18\x04 \x01(\x05:\x04\x38\x30\x30\x30\x12\x17\n\x0f\x61\x63\x63\x65pt_language\x18\x05 \x03(\t\x12 \n\x15self_test_sample_size\x18\x06 \x01(\x05:\x01\x30\x12*\n\x1b\x65nable_differential_privacy\x18\x32 \x01(\x08:\x05\x66\x61lse\x12+\n differential_privacy_noise_level\x18\x33 \x01(\x02:\x01\x30\x12\x32\n\'differential_privacy_clipping_threshold\x18\x34 \x01(\x04:\x01\x30\x12\"\n\x12\x63haracter_coverage\x18\n \x01(\x02:\x06\x30.9995\x12\x1e\n\x13input_sentence_size\x18\x0b \x01(\x04:\x01\x30\x12$\n\x16shuffle_input_sentence\x18\x13 \x01(\x08:\x04true\x12 \n\x14mining_sentence_size\x18\x0c \x01(\x05\x42\x02\x18\x01\x12\"\n\x16training_sentence_size\x18\r \x01(\x05\x42\x02\x18\x01\x12(\n\x17seed_sentencepiece_size\x18\x0e \x01(\x05:\x07\x31\x30\x30\x30\x30\x30\x30\x12\x1e\n\x10shrinking_factor\x18\x0f \x01(\x02:\x04\x30.75\x12!\n\x13max_sentence_length\x18\x12 \x01(\x05:\x04\x34\x31\x39\x32\x12\x17\n\x0bnum_threads\x18\x10 \x01(\x05:\x02\x31\x36\x12\x1d\n\x12num_sub_iterations\x18\x11 \x01(\x05:\x01\x32\x12$\n\x18max_sentencepiece_length\x18\x14 \x01(\x05:\x02\x31\x36\x12%\n\x17split_by_unicode_script\x18\x15 \x01(\x08:\x04true\x12\x1d\n\x0fsplit_by_number\x18\x17 \x01(\x08:\x04true\x12!\n\x13split_by_whitespace\x18\x16 \x01(\x08:\x04true\x12)\n\x1atreat_whitespace_as_suffix\x18\x18 \x01(\x08:\x05\x66\x61lse\x12+\n\x1c\x61llow_whitespace_only_pieces\x18\x1a \x01(\x08:\x05\x66\x61lse\x12\x1b\n\x0csplit_digits\x18\x19 \x01(\x08:\x05\x66\x61lse\x12#\n\x19pretokenization_delimiter\x18\x35 \x01(\t:\x00\x12\x17\n\x0f\x63ontrol_symbols\x18\x1e \x03(\t\x12\x1c\n\x14user_defined_symbols\x18\x1f \x03(\t\x12\x16\n\x0erequired_chars\x18$ \x01(\t\x12\x1c\n\rbyte_fallback\x18# \x01(\x08:\x05\x66\x61lse\x12+\n\x1dvocabulary_output_piece_score\x18 \x01(\x08:\x04true\x12\x1e\n\x10hard_vocab_limit\x18! \x01(\x08:\x04true\x12\x1c\n\ruse_all_vocab\x18\" \x01(\x08:\x05\x66\x61lse\x12\x11\n\x06unk_id\x18( \x01(\x05:\x01\x30\x12\x11\n\x06\x62os_id\x18) \x01(\x05:\x01\x31\x12\x11\n\x06\x65os_id\x18* \x01(\x05:\x01\x32\x12\x12\n\x06pad_id\x18+ \x01(\x05:\x02-1\x12\x18\n\tunk_piece\x18- \x01(\t:\x05<unk>\x12\x16\n\tbos_piece\x18. \x01(\t:\x03<s>\x12\x17\n\teos_piece\x18/ \x01(\t:\x04</s>\x12\x18\n\tpad_piece\x18\x30 \x01(\t:\x05<pad>\x12\x1a\n\x0bunk_surface\x18, \x01(\t:\x05 \xe2\x81\x87 \x12+\n\x1ctrain_extremely_large_corpus\x18\x31 \x01(\x08:\x05\x66\x61lse\"5\n\tModelType\x12\x0b\n\x07UNIGRAM\x10\x01\x12\x07\n\x03\x42PE\x10\x02\x12\x08\n\x04WORD\x10\x03\x12\x08\n\x04\x43HAR\x10\x04*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xd1\x01\n\x0eNormalizerSpec\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\x1c\n\x14precompiled_charsmap\x18\x02 \x01(\x0c\x12\x1e\n\x10\x61\x64\x64_dummy_prefix\x18\x03 \x01(\x08:\x04true\x12&\n\x18remove_extra_whitespaces\x18\x04 \x01(\x08:\x04true\x12 \n\x12\x65scape_whitespaces\x18\x05 \x01(\x08:\x04true\x12\x1e\n\x16normalization_rule_tsv\x18\x06 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"y\n\x0cSelfTestData\x12\x33\n\x07samples\x18\x01 \x03(\x0b\x32\".sentencepiece.SelfTestData.Sample\x1a)\n\x06Sample\x12\r\n\x05input\x18\x01 \x01(\t\x12\x10\n\x08\x65xpected\x18\x02 \x01(\t*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\"\xfe\x03\n\nModelProto\x12\x37\n\x06pieces\x18\x01 \x03(\x0b\x32\'.sentencepiece.ModelProto.SentencePiece\x12\x30\n\x0ctrainer_spec\x18\x02 \x01(\x0b\x32\x1a.sentencepiece.TrainerSpec\x12\x36\n\x0fnormalizer_spec\x18\x03 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x12\x33\n\x0eself_test_data\x18\x04 \x01(\x0b\x32\x1b.sentencepiece.SelfTestData\x12\x38\n\x11\x64\x65normalizer_spec\x18\x05 \x01(\x0b\x32\x1d.sentencepiece.NormalizerSpec\x1a\xd2\x01\n\rSentencePiece\x12\r\n\x05piece\x18\x01 \x01(\t\x12\r\n\x05score\x18\x02 \x01(\x02\x12\x42\n\x04type\x18\x03 \x01(\x0e\x32,.sentencepiece.ModelProto.SentencePiece.Type:\x06NORMAL\"T\n\x04Type\x12\n\n\x06NORMAL\x10\x01\x12\x0b\n\x07UNKNOWN\x10\x02\x12\x0b\n\x07\x43ONTROL\x10\x03\x12\x10\n\x0cUSER_DEFINED\x10\x04\x12\x08\n\x04\x42YTE\x10\x06\x12\n\n\x06UNUSED\x10\x05*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02*\t\x08\xc8\x01\x10\x80\x80\x80\x80\x02\x42\x02H\x03"""
)
__lowerCamelCase : Any = globals()
_builder.BuildMessageAndEnumDescriptors(DESCRIPTOR, _globals)
_builder.BuildTopDescriptorsAndMessages(DESCRIPTOR, """sentencepiece_model_pb2""", _globals)
if _descriptor._USE_C_DESCRIPTORS is False:
__lowerCamelCase : Union[str, Any] = None
__lowerCamelCase : Optional[Any] = b'H\003'
# (generated by protobuf compiler, but `_TRAINERSPEC` is not defined)
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["mining_sentence_size"]._serialized_options = b"\030\001"
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._options = None
# _TRAINERSPEC.fields_by_name["training_sentence_size"]._serialized_options = b"\030\001"
__lowerCamelCase : str = 45
__lowerCamelCase : Optional[Any] = 1581
__lowerCamelCase : List[str] = 1517
__lowerCamelCase : Dict = 1570
__lowerCamelCase : List[str] = 1584
__lowerCamelCase : Any = 1793
__lowerCamelCase : Dict = 1795
__lowerCamelCase : Tuple = 1916
__lowerCamelCase : List[Any] = 1864
__lowerCamelCase : Tuple = 1905
__lowerCamelCase : Optional[int] = 1919
__lowerCamelCase : Optional[int] = 2429
__lowerCamelCase : Any = 2208
__lowerCamelCase : int = 2418
__lowerCamelCase : Union[str, Any] = 2323
__lowerCamelCase : Any = 2407
# @@protoc_insertion_point(module_scope)
| 702 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCamelCase : Tuple = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("ยก" ) , ord("ยฌ" ) + 1 ) ) + list(range(ord("ยฎ" ) , ord("รฟ" ) + 1 ) )
)
snake_case__ : Optional[int] = bs[:]
snake_case__ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
snake_case__ : Dict = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__ : Dict = set()
snake_case__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : List[Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ):
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="utf-8" ) as vocab_handle:
snake_case__ : Any = json.load(__A )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
snake_case__ : Union[str, Any] = errors # how to handle errors in decoding
snake_case__ : Any = bytes_to_unicode()
snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="utf-8" ) as merges_handle:
snake_case__ : str = merges_handle.read().split("\n" )[1:-1]
snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Optional[int] = {}
snake_case__ : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _lowercase ( self : List[Any] ):
return len(self.encoder )
def _lowercase ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Optional[Any] , __A : Optional[int] ):
if token in self.cache:
return self.cache[token]
snake_case__ : Union[str, Any] = tuple(__A )
snake_case__ : List[Any] = get_pairs(__A )
if not pairs:
return token
while True:
snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : Dict = bigram
snake_case__ : str = []
snake_case__ : Union[str, Any] = 0
while i < len(__A ):
try:
snake_case__ : Dict = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : str = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : str = tuple(__A )
snake_case__ : int = new_word
if len(__A ) == 1:
break
else:
snake_case__ : List[str] = get_pairs(__A )
snake_case__ : List[Any] = " ".join(__A )
snake_case__ : Optional[int] = word
return word
def _lowercase ( self : Optional[Any] , __A : Optional[Any] ):
snake_case__ : List[str] = []
for token in re.findall(self.pat , __A ):
snake_case__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) )
return bpe_tokens
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , __A : Optional[Any] ):
return self.decoder.get(__A )
def _lowercase ( self : Union[str, Any] , __A : Dict ):
snake_case__ : Optional[Any] = "".join(__A )
snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : str = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
snake_case__ : str = 0
with open(__A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case__ : int = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ):
snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
snake_case__ : Optional[int] = " " + text
return (text, kwargs)
def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
snake_case__ : Optional[Any] = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case__ : int = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case__ : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 25 | 0 |
from __future__ import annotations
from collections.abc import Iterator
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Tuple , __A : Tuple ):
snake_case__ : Any = data
snake_case__ : Node | None = None
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Tuple ):
snake_case__ : Union[str, Any] = None
snake_case__ : int = None
def __iter__( self : List[Any] ):
snake_case__ : Tuple = self.head
while self.head:
yield node.data
snake_case__ : Union[str, Any] = node.next
if node == self.head:
break
def __len__( self : Union[str, Any] ):
return sum(1 for _ in self )
def __repr__( self : Optional[int] ):
return "->".join(str(lowercase__ ) for item in iter(self ) )
def _lowercase ( self : List[Any] , __A : Optional[int] ):
self.insert_nth(len(self ) , lowercase__ )
def _lowercase ( self : Union[str, Any] , __A : Dict ):
self.insert_nth(0 , lowercase__ )
def _lowercase ( self : List[Any] , __A : List[Any] , __A : Tuple ):
if index < 0 or index > len(self ):
raise IndexError("list index out of range." )
snake_case__ : Union[str, Any] = Node(lowercase__ )
if self.head is None:
snake_case__ : List[Any] = new_node # first node points itself
snake_case__ : Union[str, Any] = new_node
elif index == 0: # insert at head
snake_case__ : int = self.head
snake_case__ : str = new_node
else:
snake_case__ : Any = self.head
for _ in range(index - 1 ):
snake_case__ : Union[str, Any] = temp.next
snake_case__ : Dict = temp.next
snake_case__ : List[str] = new_node
if index == len(self ) - 1: # insert at tail
snake_case__ : Tuple = new_node
def _lowercase ( self : Optional[Any] ):
return self.delete_nth(0 )
def _lowercase ( self : Union[str, Any] ):
return self.delete_nth(len(self ) - 1 )
def _lowercase ( self : Optional[int] , __A : str = 0 ):
if not 0 <= index < len(self ):
raise IndexError("list index out of range." )
snake_case__ : Union[str, Any] = self.head
if self.head == self.tail: # just one node
snake_case__ : Tuple = None
elif index == 0: # delete head node
snake_case__ : List[str] = self.tail.next.next
snake_case__ : Optional[Any] = self.head.next
else:
snake_case__ : Union[str, Any] = self.head
for _ in range(index - 1 ):
snake_case__ : Tuple = temp.next
snake_case__ : str = temp.next
snake_case__ : Optional[Any] = temp.next.next
if index == len(self ) - 1: # delete at tail
snake_case__ : List[str] = temp
return delete_node.data
def _lowercase ( self : Dict ):
return len(self ) == 0
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[Any] = CircularLinkedList()
assert len(SCREAMING_SNAKE_CASE__ ) == 0
assert circular_linked_list.is_empty() is True
assert str(SCREAMING_SNAKE_CASE__ ) == ""
try:
circular_linked_list.delete_front()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_tail()
raise AssertionError # This should not happen
except IndexError:
assert True # This should happen
try:
circular_linked_list.delete_nth(-1 )
raise AssertionError
except IndexError:
assert True
try:
circular_linked_list.delete_nth(0 )
raise AssertionError
except IndexError:
assert True
assert circular_linked_list.is_empty() is True
for i in range(5 ):
assert len(SCREAMING_SNAKE_CASE__ ) == i
circular_linked_list.insert_nth(SCREAMING_SNAKE_CASE__ , i + 1 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 6 ) )
circular_linked_list.insert_tail(6 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 7 ) )
circular_linked_list.insert_head(0 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(0 , 7 ) )
assert circular_linked_list.delete_front() == 0
assert circular_linked_list.delete_tail() == 6
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 6 ) )
assert circular_linked_list.delete_nth(2 ) == 3
circular_linked_list.insert_nth(2 , 3 )
assert str(SCREAMING_SNAKE_CASE__ ) == "->".join(str(SCREAMING_SNAKE_CASE__ ) for i in range(1 , 6 ) )
assert circular_linked_list.is_empty() is False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 25 | 0 |
# DISCLAIMER: This file is strongly influenced by https://github.com/yang-song/score_sde_pytorch
import math
from typing import Union
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import randn_tensor
from .scheduling_utils import SchedulerMixin
class SCREAMING_SNAKE_CASE__ ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
"""simple docstring"""
a_ = 1
@register_to_config
def __init__( self : str , __A : Dict=2_0_0_0 , __A : Union[str, Any]=0.1 , __A : Union[str, Any]=2_0 , __A : Optional[int]=1e-3 ):
snake_case__ : Any = None
snake_case__ : Union[str, Any] = None
snake_case__ : Optional[Any] = None
def _lowercase ( self : Optional[Any] , __A : Optional[Any] , __A : Union[str, torch.device] = None ):
snake_case__ : List[Any] = torch.linspace(1 , self.config.sampling_eps , __snake_case , device=__snake_case )
def _lowercase ( self : Any , __A : str , __A : Any , __A : Dict , __A : int=None ):
if self.timesteps is None:
raise ValueError(
"`self.timesteps` is not set, you need to run \'set_timesteps\' after creating the scheduler" )
# TODO(Patrick) better comments + non-PyTorch
# postprocess model score
snake_case__ : Any = (
-0.2_5 * t**2 * (self.config.beta_max - self.config.beta_min) - 0.5 * t * self.config.beta_min
)
snake_case__ : str = torch.sqrt(1.0 - torch.exp(2.0 * log_mean_coeff ) )
snake_case__ : Optional[Any] = std.flatten()
while len(std.shape ) < len(score.shape ):
snake_case__ : Any = std.unsqueeze(-1 )
snake_case__ : Optional[int] = -score / std
# compute
snake_case__ : List[str] = -1.0 / len(self.timesteps )
snake_case__ : Optional[int] = self.config.beta_min + t * (self.config.beta_max - self.config.beta_min)
snake_case__ : int = beta_t.flatten()
while len(beta_t.shape ) < len(x.shape ):
snake_case__ : Any = beta_t.unsqueeze(-1 )
snake_case__ : List[Any] = -0.5 * beta_t * x
snake_case__ : List[Any] = torch.sqrt(__snake_case )
snake_case__ : Union[str, Any] = drift - diffusion**2 * score
snake_case__ : Optional[int] = x + drift * dt
# add noise
snake_case__ : Tuple = randn_tensor(x.shape , layout=x.layout , generator=__snake_case , device=x.device , dtype=x.dtype )
snake_case__ : str = x_mean + diffusion * math.sqrt(-dt ) * noise
return x, x_mean
def __len__( self : Tuple ):
return self.config.num_train_timesteps
| 704 |
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Any = [0] * len(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
# use last results for better performance - dynamic programming
snake_case__ : Union[str, Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
snake_case__ : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
snake_case__ : int = j
return prefix_result
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return max(prefix_function(snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
import unittest
from transformers import CamembertTokenizer, CamembertTokenizerFast
from transformers.testing_utils import get_tests_dir, require_sentencepiece, require_tokenizers, slow
from transformers.utils import is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
__lowerCamelCase : List[str] = get_tests_dir("""fixtures/test_sentencepiece.model""")
__lowerCamelCase : Optional[int] = get_tests_dir("""fixtures/test_sentencepiece_bpe.model""")
__lowerCamelCase : Union[str, Any] = """pt""" if is_torch_available() else """tf"""
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = CamembertTokenizer
a_ = CamembertTokenizerFast
a_ = True
a_ = True
def _lowercase ( self : Optional[Any] ):
super().setUp()
# We have a SentencePiece fixture for testing
snake_case__ : Optional[Any] = CamembertTokenizer(__A )
tokenizer.save_pretrained(self.tmpdirname )
def _lowercase ( self : Dict ):
snake_case__ : Union[str, Any] = "<pad>"
snake_case__ : Dict = 1
self.assertEqual(self.get_tokenizer()._convert_token_to_id(__A ) , __A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(__A ) , __A )
def _lowercase ( self : Any ):
snake_case__ : List[Any] = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , "<s>NOTUSED" )
self.assertEqual(vocab_keys[1] , "<pad>" )
self.assertEqual(vocab_keys[-1] , "<mask>" )
self.assertEqual(len(__A ) , 1_0_0_4 )
def _lowercase ( self : Optional[Any] ):
self.assertEqual(self.get_tokenizer().vocab_size , 1_0_0_5 )
def _lowercase ( self : List[str] ):
snake_case__ : Optional[int] = CamembertTokenizer(__A )
tokenizer.save_pretrained(self.tmpdirname )
snake_case__ : Dict = CamembertTokenizerFast.from_pretrained(self.tmpdirname )
snake_case__ : int = "I was born in 92000, and this is falsรฉ."
snake_case__ : Union[str, Any] = tokenizer.encode(__A )
snake_case__ : Tuple = rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
snake_case__ : int = tokenizer.encode(__A , add_special_tokens=__A )
snake_case__ : List[str] = rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
# <unk> tokens are not the same for `rust` than for `slow`.
# Because spm gives back raw token instead of `unk` in EncodeAsPieces
# tokens = tokenizer.tokenize(sequence)
snake_case__ : Optional[Any] = tokenizer.convert_ids_to_tokens(__A )
snake_case__ : Optional[int] = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
def _lowercase ( self : Union[str, Any] ):
if not self.test_rust_tokenizer:
return
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : Dict = self.get_rust_tokenizer()
snake_case__ : Union[str, Any] = "I was born in 92000, and this is falsรฉ."
snake_case__ : List[Any] = tokenizer.tokenize(__A )
snake_case__ : List[Any] = rust_tokenizer.tokenize(__A )
self.assertListEqual(__A , __A )
snake_case__ : Union[str, Any] = tokenizer.encode(__A , add_special_tokens=__A )
snake_case__ : Union[str, Any] = rust_tokenizer.encode(__A , add_special_tokens=__A )
self.assertListEqual(__A , __A )
snake_case__ : int = self.get_rust_tokenizer()
snake_case__ : List[str] = tokenizer.encode(__A )
snake_case__ : Any = rust_tokenizer.encode(__A )
self.assertListEqual(__A , __A )
@slow
def _lowercase ( self : int ):
snake_case__ : Tuple = {"input_ids": [[5, 5_4, 7_1_9_6, 2_9_7, 3_0, 2_3, 7_7_6, 1_8, 1_1, 3_2_1_5, 3_7_0_5, 8_2_5_2, 2_2, 3_1_6_4, 1_1_8_1, 2_1_1_6, 2_9, 1_6, 8_1_3, 2_5, 7_9_1, 3_3_1_4, 2_0, 3_4_4_6, 3_8, 2_7_5_7_5, 1_2_0, 6, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [5, 4_6_8, 1_7, 1_1, 9_0_8_8, 2_0, 1_5_1_7, 8, 2_2_8_0_4, 1_8_8_1_8, 1_0, 3_8, 6_2_9, 6_0_7, 6_0_7, 1_4_2, 1_9, 7_1_9_6, 8_6_7, 5_6, 1_0_3_2_6, 2_4, 2_2_6_7, 2_0, 4_1_6, 5_0_7_2, 1_5_6_1_2, 2_3_3, 7_3_4, 7, 2_3_9_9, 2_7, 1_6, 3_0_1_5, 1_6_4_9, 7, 2_4, 2_0, 4_3_3_8, 2_3_9_9, 2_7, 1_3, 3_4_0_0, 1_4, 1_3, 6_1_8_9, 8, 9_3_0, 9, 6]], "attention_mask": [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]]} # noqa: E501
# fmt: on
# camembert is a french model. So we also use french texts.
snake_case__ : List[Any] = [
"Le transformeur est un modรจle d\'apprentissage profond introduit en 2017, "
"utilisรฉ principalement dans le domaine du traitement automatique des langues (TAL).",
"ร l\'instar des rรฉseaux de neurones rรฉcurrents (RNN), les transformeurs sont conรงus "
"pour gรฉrer des donnรฉes sรฉquentielles, telles que le langage naturel, pour des tรขches "
"telles que la traduction et la synthรจse de texte.",
]
self.tokenizer_integration_test_util(
expected_encoding=__A , model_name="camembert-base" , revision="3a0641d9a1aeb7e848a74299e7e4c4bca216b4cf" , sequences=__A , )
| 705 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[int] = get_logger()
__lowerCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case__ : str = str(jax.devices()[0] )
snake_case__ : str = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def _lowercase ( self : Optional[Any] , __A : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def _lowercase ( self : int , __A : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ : Optional[int] = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case__ : Any = {"dtype": jnp.intaa}
else:
snake_case__ : Tuple = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
snake_case__ : Optional[Any] = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ):
snake_case__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def _lowercase ( self : Tuple , __A : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def _lowercase ( self : Optional[int] , __A : pa.Table ):
snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A )
snake_case__ : Tuple = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def _lowercase ( self : Optional[Any] , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A )
snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
snake_case__ : Dict = self._consolidate(__A )
return column
def _lowercase ( self : str , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A )
snake_case__ : int = self.python_features_decoder.decode_batch(__A )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
for column_name in batch:
snake_case__ : Any = self._consolidate(batch[column_name] )
return batch
| 25 | 0 |
import argparse
import collections
import numpy as np
import torch
from flax import traverse_util
from tax import checkpoints
from transformers import MTaConfig, UMTaEncoderModel, UMTaForConditionalGeneration
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Any , snake_case_ : str ):
return params[F'''{prefix}/{prefix}/relpos_bias/rel_embedding'''][:, i, :]
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Optional[int] , snake_case_ : Dict="attention" ):
snake_case__ : List[Any] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/key/kernel'''][:, i, :, :] )
snake_case__ : Tuple = k_tmp.reshape(k_tmp.shape[0] , k_tmp.shape[1] * k_tmp.shape[2] )
snake_case__ : Optional[Any] = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/out/kernel'''][:, i, :, :] )
snake_case__ : Dict = o_tmp.reshape(o_tmp.shape[0] * o_tmp.shape[1] , o_tmp.shape[2] )
snake_case__ : str = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/query/kernel'''][:, i, :, :] )
snake_case__ : int = q_tmp.reshape(q_tmp.shape[0] , q_tmp.shape[1] * q_tmp.shape[2] )
snake_case__ : Dict = np.ascontiguousarray(params[F'''{prefix}/{prefix}/{layer_name}/value/kernel'''][:, i, :, :] )
snake_case__ : Optional[Any] = v_tmp.reshape(v_tmp.shape[0] , v_tmp.shape[1] * v_tmp.shape[2] )
return k, o, q, v
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : Optional[Any]=False ):
if split_mlp_wi:
snake_case__ : Dict = params[F'''{prefix}/{prefix}/mlp/wi_0/kernel'''][:, i, :]
snake_case__ : Optional[Any] = params[F'''{prefix}/{prefix}/mlp/wi_1/kernel'''][:, i, :]
snake_case__ : Union[str, Any] = (wi_a, wi_a)
else:
snake_case__ : Optional[Any] = params[F'''{prefix}/{prefix}/mlp/wi/kernel'''][:, i, :]
snake_case__ : int = params[F'''{prefix}/{prefix}/mlp/wo/kernel'''][:, i, :]
return wi, wo
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : str ):
return params[F'''{prefix}/{prefix}/{layer_name}/scale'''][:, i]
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , *, snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : int = False ):
snake_case__ : int = traverse_util.flatten_dict(variables["target"] )
snake_case__ : str = {"/".join(lowercase_ ): v for k, v in old.items()}
# v1.1 models have a gated GeLU with wi_0 and wi_1 instead of wi
snake_case__ : Union[str, Any] = "encoder/encoder/mlp/wi_0/kernel" in old
print("Split MLP:" , lowercase_ )
snake_case__ : List[Any] = collections.OrderedDict()
# Shared embeddings.
snake_case__ : str = old["token_embedder/embedding"]
# Encoder.
for i in range(lowercase_ ):
# Block i, layer 0 (Self Attention).
snake_case__ : int = tax_layer_norm_lookup(lowercase_ , lowercase_ , "encoder" , "pre_attention_layer_norm" )
snake_case__ : str = tax_attention_lookup(lowercase_ , lowercase_ , "encoder" , "attention" )
snake_case__ : List[Any] = layer_norm
snake_case__ : str = k.T
snake_case__ : Optional[Any] = o.T
snake_case__ : List[str] = q.T
snake_case__ : int = v.T
# Block i, layer 1 (MLP).
snake_case__ : Dict = tax_layer_norm_lookup(lowercase_ , lowercase_ , "encoder" , "pre_mlp_layer_norm" )
snake_case__ : int = tax_mlp_lookup(lowercase_ , lowercase_ , "encoder" , lowercase_ )
snake_case__ : int = layer_norm
if split_mlp_wi:
snake_case__ : Optional[int] = wi[0].T
snake_case__ : Optional[Any] = wi[1].T
else:
snake_case__ : Any = wi.T
snake_case__ : Optional[Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case__ : Optional[int] = tax_relpos_bias_lookup(
lowercase_ , lowercase_ , "encoder" ).T
snake_case__ : str = old["encoder/encoder_norm/scale"]
if not scalable_attention:
snake_case__ : Any = tax_relpos_bias_lookup(
lowercase_ , 0 , "encoder" ).T
snake_case__ : str = tax_relpos_bias_lookup(
lowercase_ , 0 , "decoder" ).T
if not is_encoder_only:
# Decoder.
for i in range(lowercase_ ):
# Block i, layer 0 (Self Attention).
snake_case__ : List[Any] = tax_layer_norm_lookup(lowercase_ , lowercase_ , "decoder" , "pre_self_attention_layer_norm" )
snake_case__ : Any = tax_attention_lookup(lowercase_ , lowercase_ , "decoder" , "self_attention" )
snake_case__ : List[str] = layer_norm
snake_case__ : Union[str, Any] = k.T
snake_case__ : str = o.T
snake_case__ : List[str] = q.T
snake_case__ : Union[str, Any] = v.T
# Block i, layer 1 (Cross Attention).
snake_case__ : Dict = tax_layer_norm_lookup(lowercase_ , lowercase_ , "decoder" , "pre_cross_attention_layer_norm" )
snake_case__ : int = tax_attention_lookup(lowercase_ , lowercase_ , "decoder" , "encoder_decoder_attention" )
snake_case__ : List[str] = layer_norm
snake_case__ : str = k.T
snake_case__ : List[Any] = o.T
snake_case__ : int = q.T
snake_case__ : List[Any] = v.T
# Block i, layer 2 (MLP).
snake_case__ : Tuple = tax_layer_norm_lookup(lowercase_ , lowercase_ , "decoder" , "pre_mlp_layer_norm" )
snake_case__ : Tuple = tax_mlp_lookup(lowercase_ , lowercase_ , "decoder" , lowercase_ )
snake_case__ : Optional[int] = layer_norm
if split_mlp_wi:
snake_case__ : List[Any] = wi[0].T
snake_case__ : Optional[int] = wi[1].T
else:
snake_case__ : str = wi.T
snake_case__ : Union[str, Any] = wo.T
if scalable_attention:
# convert the rel_embedding of each layer
snake_case__ : Optional[int] = tax_relpos_bias_lookup(lowercase_ , lowercase_ , "decoder" ).T
snake_case__ : Tuple = old["decoder/decoder_norm/scale"]
# LM Head (only in v1.1 checkpoints, in v1.0 embeddings are used instead)
if "decoder/logits_dense/kernel" in old:
snake_case__ : Any = old["decoder/logits_dense/kernel"].T
return new
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Dict ):
snake_case__ : Dict = collections.OrderedDict([(k, torch.from_numpy(v.copy() )) for (k, v) in converted_params.items()] )
# Add what is missing.
if "encoder.embed_tokens.weight" not in state_dict:
snake_case__ : str = state_dict["shared.weight"]
if not is_encoder_only:
if "decoder.embed_tokens.weight" not in state_dict:
snake_case__ : Optional[int] = state_dict["shared.weight"]
if "lm_head.weight" not in state_dict: # For old 1.0 models.
print("Using shared word embeddings as lm_head." )
snake_case__ : int = state_dict["shared.weight"]
return state_dict
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , snake_case_ : List[str] , snake_case_ : Union[str, Any] , snake_case_ : int ):
snake_case__ : int = checkpoints.load_tax_checkpoint(lowercase_ )
snake_case__ : List[str] = convert_tax_to_pytorch(
lowercase_ , num_layers=config.num_layers , is_encoder_only=lowercase_ , scalable_attention=lowercase_ )
snake_case__ : Union[str, Any] = make_state_dict(lowercase_ , lowercase_ )
model.load_state_dict(lowercase_ , strict=lowercase_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : List[str] , snake_case_ : int , snake_case_ : List[Any] = False , snake_case_ : Tuple = False , ):
snake_case__ : int = MTaConfig.from_json_file(lowercase_ )
print(F'''Building PyTorch model from configuration: {config}''' )
# Non-v1.1 checkpoints could also use T5Model, but this works for all.
# The v1.0 checkpoints will simply have an LM head that is the word embeddings.
if is_encoder_only:
snake_case__ : Optional[int] = UMTaEncoderModel(lowercase_ )
else:
snake_case__ : Optional[int] = UMTaForConditionalGeneration(lowercase_ )
# Load weights from tf checkpoint
load_tax_weights_in_ta(lowercase_ , lowercase_ , lowercase_ , lowercase_ , lowercase_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
model.save_pretrained(lowercase_ )
# Verify that we can load the checkpoint.
model.from_pretrained(lowercase_ )
print("Done" )
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser(description="""Converts a native T5X checkpoint into a PyTorch checkpoint.""")
# Required parameters
parser.add_argument(
"""--t5x_checkpoint_path""", default=None, type=str, required=True, help="""Path to the T5X checkpoint."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained T5 model.\nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--is_encoder_only""", action="""store_true""", help="""Check if the model is encoder-decoder model""", default=False
)
parser.add_argument(
"""--scalable_attention""",
action="""store_true""",
help="""Whether the model uses scaled attention (umt5 model)""",
default=False,
)
__lowerCamelCase : List[str] = parser.parse_args()
convert_tax_checkpoint_to_pytorch(
args.tax_checkpoint_path,
args.config_file,
args.pytorch_dump_path,
args.is_encoder_only,
args.scalable_attention,
)
| 706 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 0 |
import unittest
import numpy as np
from transformers import BertConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_flax_available():
from transformers.models.bert.modeling_flax_bert import (
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForNextSentencePrediction,
FlaxBertForPreTraining,
FlaxBertForQuestionAnswering,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertModel,
)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : str , __A : Optional[int] , __A : Optional[int]=1_3 , __A : Optional[Any]=7 , __A : List[str]=True , __A : int=True , __A : int=True , __A : Union[str, Any]=True , __A : Union[str, Any]=9_9 , __A : List[Any]=3_2 , __A : Optional[Any]=5 , __A : int=4 , __A : List[str]=3_7 , __A : Any="gelu" , __A : Tuple=0.1 , __A : Tuple=0.1 , __A : Union[str, Any]=5_1_2 , __A : Tuple=1_6 , __A : List[Any]=2 , __A : List[str]=0.0_2 , __A : List[str]=4 , ):
snake_case__ : Union[str, Any] = parent
snake_case__ : Any = batch_size
snake_case__ : Optional[int] = seq_length
snake_case__ : List[str] = is_training
snake_case__ : Optional[int] = use_attention_mask
snake_case__ : Dict = use_token_type_ids
snake_case__ : Optional[int] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Optional[int] = hidden_act
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : Dict = max_position_embeddings
snake_case__ : List[str] = type_vocab_size
snake_case__ : str = type_sequence_label_size
snake_case__ : List[str] = initializer_range
snake_case__ : Union[str, Any] = num_choices
def _lowercase ( self : Optional[int] ):
snake_case__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : List[Any] = None
if self.use_attention_mask:
snake_case__ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Optional[int] = None
if self.use_token_type_ids:
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Optional[int] = BertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=_snake_case , initializer_range=self.initializer_range , )
return config, input_ids, token_type_ids, attention_mask
def _lowercase ( self : int ):
snake_case__ : Any = self.prepare_config_and_inputs()
snake_case__, snake_case__, snake_case__, snake_case__ : Optional[Any] = config_and_inputs
snake_case__ : Tuple = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": attention_mask}
return config, inputs_dict
def _lowercase ( self : str ):
snake_case__ : str = self.prepare_config_and_inputs()
snake_case__, snake_case__, snake_case__, snake_case__ : List[str] = config_and_inputs
snake_case__ : Tuple = True
snake_case__ : List[str] = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
attention_mask,
encoder_hidden_states,
encoder_attention_mask,
)
@require_flax
class SCREAMING_SNAKE_CASE__ ( UpperCAmelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = True
a_ = (
(
FlaxBertModel,
FlaxBertForPreTraining,
FlaxBertForMaskedLM,
FlaxBertForMultipleChoice,
FlaxBertForQuestionAnswering,
FlaxBertForNextSentencePrediction,
FlaxBertForSequenceClassification,
FlaxBertForTokenClassification,
FlaxBertForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _lowercase ( self : Any ):
snake_case__ : Optional[int] = FlaxBertModelTester(self )
@slow
def _lowercase ( self : int ):
snake_case__ : List[Any] = FlaxBertModel.from_pretrained("bert-base-cased" )
snake_case__ : Optional[int] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_snake_case )
| 707 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Tuple = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def _lowercase ( self : Dict ):
snake_case__ : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Union[str, Any] = get_activation("gelu" )
snake_case__ : int = get_activation("gelu_10" )
snake_case__ : Optional[int] = torch_builtin(__A )
snake_case__ : Dict = geluaa(__A )
snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowercase ( self : str ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__A ):
get_activation("bogus" )
with self.assertRaises(__A ):
get_activation(__A )
def _lowercase ( self : List[str] ):
snake_case__ : List[str] = get_activation("gelu" )
snake_case__ : Any = 1
snake_case__ : Union[str, Any] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
snake_case__ : int = acta.a
| 25 | 0 |
import argparse
import ast
import logging
import os
import sys
import pandas as pd
import torch
from tqdm import tqdm
from transformers import BartForConditionalGeneration, RagRetriever, RagSequenceForGeneration, RagTokenForGeneration
from transformers import logging as transformers_logging
sys.path.append(os.path.join(os.getcwd())) # noqa: E402 # isort:skip
from utils_rag import exact_match_score, fa_score # noqa: E402 # isort:skip
__lowerCamelCase : Optional[int] = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
transformers_logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
if "token" in model_name_or_path:
return "rag_token"
if "sequence" in model_name_or_path:
return "rag_sequence"
if "bart" in model_name_or_path:
return "bart"
return None
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : List[Any] ):
return max(metric_fn(snake_case_ , snake_case_ ) for gt in ground_truths )
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : Any ):
snake_case__ : List[str] = [line.strip() for line in open(snake_case_ , "r" ).readlines()]
snake_case__ : Optional[Any] = []
if args.gold_data_mode == "qa":
snake_case__ : Optional[int] = pd.read_csv(snake_case_ , sep="\t" , header=snake_case_ )
for answer_list in data[1]:
snake_case__ : Union[str, Any] = ast.literal_eval(snake_case_ )
answers.append(snake_case_ )
else:
snake_case__ : Optional[Any] = [line.strip() for line in open(snake_case_ , "r" ).readlines()]
snake_case__ : Optional[int] = [[reference] for reference in references]
snake_case__ : Dict = 0
for prediction, ground_truths in zip(snake_case_ , snake_case_ ):
total += 1
em += metric_max_over_ground_truths(snake_case_ , snake_case_ , snake_case_ )
fa += metric_max_over_ground_truths(snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Any = 1_00.0 * em / total
snake_case__ : Optional[Any] = 1_00.0 * fa / total
logger.info(F'''F1: {fa:.2f}''' )
logger.info(F'''EM: {em:.2f}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Dict , snake_case_ : int ):
snake_case__ : Any = args.k
snake_case__ : Tuple = [line.strip() for line in open(snake_case_ , "r" ).readlines()]
snake_case__ : int = [line.strip() for line in open(snake_case_ , "r" ).readlines()]
snake_case__ : Dict = 0
for hypo, reference in zip(snake_case_ , snake_case_ ):
snake_case__ : List[str] = set(hypo.split("\t" )[:k] )
snake_case__ : str = set(reference.split("\t" ) )
total += 1
em += len(hypo_provenance & ref_provenance ) / k
snake_case__ : Any = 1_00.0 * em / total
logger.info(F'''Precision@{k}: {em: .2f}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Optional[int] ):
def strip_title(snake_case_ : Union[str, Any] ):
if title.startswith("\"" ):
snake_case__ : List[Any] = title[1:]
if title.endswith("\"" ):
snake_case__ : Union[str, Any] = title[:-1]
return title
snake_case__ : List[str] = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case_ , return_tensors="pt" , padding=snake_case_ , truncation=snake_case_ , )["input_ids"].to(args.device )
snake_case__ : int = rag_model.rag.question_encoder(snake_case_ )
snake_case__ : Optional[Any] = question_enc_outputs[0]
snake_case__ : Tuple = rag_model.retriever(
snake_case_ , question_enc_pool_output.cpu().detach().to(torch.floataa ).numpy() , prefix=rag_model.rag.generator.config.prefix , n_docs=rag_model.config.n_docs , return_tensors="pt" , )
snake_case__ : Tuple = rag_model.retriever.index.get_doc_dicts(result.doc_ids )
snake_case__ : Dict = []
for docs in all_docs:
snake_case__ : Optional[Any] = [strip_title(snake_case_ ) for title in docs["title"]]
provenance_strings.append("\t".join(snake_case_ ) )
return provenance_strings
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Optional[Any] ):
with torch.no_grad():
snake_case__ : Dict = rag_model.retriever.question_encoder_tokenizer.batch_encode_plus(
snake_case_ , return_tensors="pt" , padding=snake_case_ , truncation=snake_case_ )
snake_case__ : str = inputs_dict.input_ids.to(args.device )
snake_case__ : Any = inputs_dict.attention_mask.to(args.device )
snake_case__ : Any = rag_model.generate( # rag_model overwrites generate
snake_case_ , attention_mask=snake_case_ , num_beams=args.num_beams , min_length=args.min_length , max_length=args.max_length , early_stopping=snake_case_ , num_return_sequences=1 , bad_words_ids=[[0, 0]] , )
snake_case__ : List[str] = rag_model.retriever.generator_tokenizer.batch_decode(snake_case_ , skip_special_tokens=snake_case_ )
if args.print_predictions:
for q, a in zip(snake_case_ , snake_case_ ):
logger.info("Q: {} - A: {}".format(snake_case_ , snake_case_ ) )
return answers
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = argparse.ArgumentParser()
parser.add_argument(
"--model_type" , choices=["rag_sequence", "rag_token", "bart"] , type=snake_case_ , help=(
"RAG model type: rag_sequence, rag_token or bart, if none specified, the type is inferred from the"
" model_name_or_path"
) , )
parser.add_argument(
"--index_name" , default=snake_case_ , choices=["exact", "compressed", "legacy"] , type=snake_case_ , help="RAG model retriever type" , )
parser.add_argument(
"--index_path" , default=snake_case_ , type=snake_case_ , help="Path to the retrieval index" , )
parser.add_argument("--n_docs" , default=5 , type=snake_case_ , help="Number of retrieved docs" )
parser.add_argument(
"--model_name_or_path" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="Path to pretrained checkpoints or model identifier from huggingface.co/models" , )
parser.add_argument(
"--eval_mode" , choices=["e2e", "retrieval"] , default="e2e" , type=snake_case_ , help=(
"Evaluation mode, e2e calculates exact match and F1 of the downstream task, retrieval calculates"
" precision@k."
) , )
parser.add_argument("--k" , default=1 , type=snake_case_ , help="k for the precision@k calculation" )
parser.add_argument(
"--evaluation_set" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="Path to a file containing evaluation samples" , )
parser.add_argument(
"--gold_data_path" , default=snake_case_ , type=snake_case_ , required=snake_case_ , help="Path to a tab-separated file with gold samples" , )
parser.add_argument(
"--gold_data_mode" , default="qa" , type=snake_case_ , choices=["qa", "ans"] , help=(
"Format of the gold data file"
"qa - a single line in the following format: question [tab] answer_list"
"ans - a single line of the gold file contains the expected answer string"
) , )
parser.add_argument(
"--predictions_path" , type=snake_case_ , default="predictions.txt" , help="Name of the predictions file, to be stored in the checkpoints directory" , )
parser.add_argument(
"--eval_all_checkpoints" , action="store_true" , help="Evaluate all checkpoints starting with the same prefix as model_name ending and ending with step number" , )
parser.add_argument(
"--eval_batch_size" , default=8 , type=snake_case_ , help="Batch size per GPU/CPU for evaluation." , )
parser.add_argument(
"--recalculate" , help="Recalculate predictions even if the prediction file exists" , action="store_true" , )
parser.add_argument(
"--num_beams" , default=4 , type=snake_case_ , help="Number of beams to be used when generating answers" , )
parser.add_argument("--min_length" , default=1 , type=snake_case_ , help="Min length of the generated answers" )
parser.add_argument("--max_length" , default=50 , type=snake_case_ , help="Max length of the generated answers" )
parser.add_argument(
"--print_predictions" , action="store_true" , help="If True, prints predictions while evaluating." , )
parser.add_argument(
"--print_docs" , action="store_true" , help="If True, prints docs retried while generating." , )
snake_case__ : Union[str, Any] = parser.parse_args()
snake_case__ : Tuple = torch.device("cuda" if torch.cuda.is_available() else "cpu" )
return args
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
snake_case__ : List[Any] = {}
if args.model_type is None:
snake_case__ : str = infer_model_type(args.model_name_or_path )
assert args.model_type is not None
if args.model_type.startswith("rag" ):
snake_case__ : Any = RagTokenForGeneration if args.model_type == "rag_token" else RagSequenceForGeneration
snake_case__ : Tuple = args.n_docs
if args.index_name is not None:
snake_case__ : Tuple = args.index_name
if args.index_path is not None:
snake_case__ : Optional[Any] = args.index_path
else:
snake_case__ : List[Any] = BartForConditionalGeneration
snake_case__ : List[Any] = (
[f.path for f in os.scandir(args.model_name_or_path ) if f.is_dir()]
if args.eval_all_checkpoints
else [args.model_name_or_path]
)
logger.info("Evaluate the following checkpoints: %s" , snake_case_ )
snake_case__ : Any = get_scores if args.eval_mode == "e2e" else get_precision_at_k
snake_case__ : List[Any] = evaluate_batch_eae if args.eval_mode == "e2e" else evaluate_batch_retrieval
for checkpoint in checkpoints:
if os.path.exists(args.predictions_path ) and (not args.recalculate):
logger.info("Calculating metrics based on an existing predictions file: {}".format(args.predictions_path ) )
score_fn(snake_case_ , args.predictions_path , args.gold_data_path )
continue
logger.info("***** Running evaluation for {} *****".format(snake_case_ ) )
logger.info(" Batch size = %d" , args.eval_batch_size )
logger.info(" Predictions will be stored under {}".format(args.predictions_path ) )
if args.model_type.startswith("rag" ):
snake_case__ : Dict = RagRetriever.from_pretrained(snake_case_ , **snake_case_ )
snake_case__ : int = model_class.from_pretrained(snake_case_ , retriever=snake_case_ , **snake_case_ )
model.retriever.init_retrieval()
else:
snake_case__ : Optional[int] = model_class.from_pretrained(snake_case_ , **snake_case_ )
model.to(args.device )
with open(args.evaluation_set , "r" ) as eval_file, open(args.predictions_path , "w" ) as preds_file:
snake_case__ : Union[str, Any] = []
for line in tqdm(snake_case_ ):
questions.append(line.strip() )
if len(snake_case_ ) == args.eval_batch_size:
snake_case__ : Dict = evaluate_batch_fn(snake_case_ , snake_case_ , snake_case_ )
preds_file.write("\n".join(snake_case_ ) + "\n" )
preds_file.flush()
snake_case__ : Optional[int] = []
if len(snake_case_ ) > 0:
snake_case__ : Union[str, Any] = evaluate_batch_fn(snake_case_ , snake_case_ , snake_case_ )
preds_file.write("\n".join(snake_case_ ) )
preds_file.flush()
score_fn(snake_case_ , args.predictions_path , args.gold_data_path )
if __name__ == "__main__":
__lowerCamelCase : Dict = get_args()
main(args)
| 708 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : str = value
elif weight_type == "weight_g":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : str = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
snake_case__ : str = []
snake_case__ : Optional[int] = fairseq_model.state_dict()
snake_case__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[Any] = "weight"
else:
snake_case__ : Optional[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
snake_case__ : Tuple = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : str = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ):
if config_path is not None:
snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = UniSpeechSatConfig()
snake_case__ : str = ""
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ )
else:
snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ )
snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ , unittest.TestCase ):
"""simple docstring"""
a_ = MgpstrTokenizer
a_ = False
a_ = {}
a_ = False
def _lowercase ( self : Optional[int] ):
super().setUp()
# fmt: off
snake_case__ : Optional[Any] = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
snake_case__ : List[str] = dict(zip(_a , range(len(_a ) ) ) )
snake_case__ : Union[str, Any] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(_a ) + "\n" )
def _lowercase ( self : Tuple , **__A : Tuple ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **_a )
def _lowercase ( self : Optional[int] , __A : Union[str, Any] ):
snake_case__ : str = """tester"""
snake_case__ : Union[str, Any] = """tester"""
return input_text, output_text
@unittest.skip("MGP-STR always lower cases letters." )
def _lowercase ( self : Optional[Any] ):
pass
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = self.get_tokenizers(do_lower_case=_a )
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
snake_case__ : int = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({"cls_token": special_token} )
snake_case__ : Union[str, Any] = tokenizer.encode([special_token] , add_special_tokens=_a )
self.assertEqual(len(_a ) , 1 )
snake_case__ : Optional[int] = tokenizer.decode(_a , skip_special_tokens=_a )
self.assertTrue(special_token not in decoded )
def _lowercase ( self : Tuple ):
snake_case__ : Dict = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(f'''{tokenizer.__class__.__name__}''' ):
snake_case__ : Any = self.get_input_output_texts(_a )
snake_case__ : str = tokenizer.tokenize(_a )
snake_case__ : List[Any] = tokenizer.convert_tokens_to_ids(_a )
snake_case__ : Union[str, Any] = tokenizer.encode(_a , add_special_tokens=_a )
self.assertListEqual(_a , _a )
snake_case__ : List[Any] = tokenizer.convert_ids_to_tokens(_a )
self.assertNotEqual(len(_a ) , 0 )
snake_case__ : Optional[Any] = tokenizer.decode(_a )
self.assertIsInstance(_a , _a )
self.assertEqual(text_a.replace(" " , "" ) , _a )
@unittest.skip("MGP-STR tokenizer only handles one sequence." )
def _lowercase ( self : Tuple ):
pass
@unittest.skip("inputs cannot be pretokenized in MgpstrTokenizer" )
def _lowercase ( self : int ):
pass
| 709 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , ):
if attention_mask is None:
snake_case__ : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case__ : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case__ : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ )
if decoder_head_mask is None:
snake_case__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
if cross_attn_head_mask is None:
snake_case__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Any , __A : List[str]=1_3 , __A : List[Any]=7 , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : str=9_9 , __A : Optional[Any]=1_6 , __A : Optional[Any]=2 , __A : Any=4 , __A : List[Any]=4 , __A : int="relu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[Any]=2_0 , __A : Optional[Any]=2 , __A : int=1 , __A : Union[str, Any]=0 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : List[str] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : List[str] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : int = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Tuple = eos_token_id
snake_case__ : Dict = pad_token_id
snake_case__ : str = bos_token_id
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.eos_token_id # Eos Token
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case__ : int = input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Union[str, Any] = self.get_config()
snake_case__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _lowercase ( self : Dict ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : Optional[Any] , __A : int , __A : Dict ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
snake_case__ : List[Any] = inputs_dict["input_ids"]
snake_case__ : Optional[Any] = inputs_dict["attention_mask"]
snake_case__ : Union[str, Any] = inputs_dict["head_mask"]
# first forward pass
snake_case__ : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
snake_case__, snake_case__ : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case__ : Tuple = model(__A , attention_mask=__A )["last_hidden_state"]
snake_case__ : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[
"last_hidden_state"
]
# select random slice
snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) )
def _lowercase ( self : str , __A : Dict , __A : Optional[Any] ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval()
snake_case__ : Union[str, Any] = model(**__A )
snake_case__ : Tuple = outputs.encoder_last_hidden_state
snake_case__ : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_encoder()
encoder.save_pretrained(__A )
snake_case__ : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_decoder()
decoder.save_pretrained(__A )
snake_case__ : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a_ = True
a_ = True
a_ = False
a_ = False
def _lowercase ( self : int , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowercase ( self : Tuple ):
snake_case__ : Any = MaMaaaModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__A )
def _lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
snake_case__, snake_case__ : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["missing_keys"] , [] )
def _lowercase ( self : Dict ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def _lowercase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case__ : str = model_class(__A )
model.to(__A )
model.eval()
snake_case__ : str = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
snake_case__ : Optional[Any] = inputs["input_ids"]
del inputs["input_ids"]
else:
snake_case__ : Union[str, Any] = inputs["input_ids"]
snake_case__ : List[str] = inputs.get("decoder_input_ids" , __A )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , __A )
snake_case__ : Tuple = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case__ : List[Any] = wte(__A )
else:
snake_case__ : Any = wte(__A )
snake_case__ : Optional[int] = wte(__A )
with torch.no_grad():
model(**__A )[0]
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Any = input_dict["input_ids"]
snake_case__ : int = input_ids.ne(1 ).to(__A )
snake_case__ : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ )
__lowerCamelCase : Optional[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ):
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def _lowercase ( self : Optional[int] ):
snake_case__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : str = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : Optional[Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
# change to intended input
snake_case__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : List[str] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
snake_case__ : List[Any] = [
"L'affaire NSA souligne l'absence totale de dรฉbat sur le renseignement",
"Selon moi, il y a deux niveaux de rรฉponse de la part du gouvernement franรงais.",
"Lorsque Franรงois Hollande tรฉlรฉphone ร Barack Obama ou quand le ministre des affaires รฉtrangรจres Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils rรฉagissent ร une vraie dรฉcouverte, qui est celle de"
" l'ampleur de la surveillance amรฉricaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case__ : str = tokenizer(__A , padding=__A , return_tensors="pt" )
snake_case__ : Tuple = model.generate(
input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
snake_case__ : List[str] = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When Franรงois Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
snake_case__ : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 25 | 0 |
import tempfile
import torch
from diffusers import (
DEISMultistepScheduler,
DPMSolverMultistepScheduler,
DPMSolverSinglestepScheduler,
UniPCMultistepScheduler,
)
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( SCREAMING_SNAKE_CASE_ ):
"""simple docstring"""
a_ = (UniPCMultistepScheduler,)
a_ = (("num_inference_steps", 2_5),)
def _lowercase ( self : Tuple , **__A : Dict ):
snake_case__ : Optional[int] = {
'''num_train_timesteps''': 1_0_0_0,
'''beta_start''': 0.0_0_0_1,
'''beta_end''': 0.0_2,
'''beta_schedule''': '''linear''',
'''solver_order''': 2,
'''solver_type''': '''bh2''',
}
config.update(**UpperCamelCase__ )
return config
def _lowercase ( self : Tuple , __A : Tuple=0 , **__A : Any ):
snake_case__ : List[Any] = dict(self.forward_default_kwargs )
snake_case__ : List[Any] = kwargs.pop("num_inference_steps" , UpperCamelCase__ )
snake_case__ : Optional[int] = self.dummy_sample
snake_case__ : Tuple = 0.1 * sample
snake_case__ : Dict = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
snake_case__ : int = self.get_scheduler_config(**UpperCamelCase__ )
snake_case__ : Optional[int] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
snake_case__ : Dict = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
snake_case__ : Optional[int] = scheduler_class.from_pretrained(UpperCamelCase__ )
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals
snake_case__ : Dict = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case__ : Union[str, Any] = sample, sample
for t in range(UpperCamelCase__ , time_step + scheduler.config.solver_order + 1 ):
snake_case__ : int = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
snake_case__ : Dict = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowercase ( self : Dict , __A : Optional[Any]=0 , **__A : Dict ):
snake_case__ : int = dict(self.forward_default_kwargs )
snake_case__ : List[Any] = kwargs.pop("num_inference_steps" , UpperCamelCase__ )
snake_case__ : int = self.dummy_sample
snake_case__ : Optional[int] = 0.1 * sample
snake_case__ : str = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
for scheduler_class in self.scheduler_classes:
snake_case__ : Optional[int] = self.get_scheduler_config()
snake_case__ : Union[str, Any] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ : List[str] = dummy_past_residuals[: scheduler.config.solver_order]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(UpperCamelCase__ )
snake_case__ : Dict = scheduler_class.from_pretrained(UpperCamelCase__ )
# copy over dummy past residuals
new_scheduler.set_timesteps(UpperCamelCase__ )
# copy over dummy past residual (must be after setting timesteps)
snake_case__ : int = dummy_past_residuals[: new_scheduler.config.solver_order]
snake_case__ : Optional[Any] = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
snake_case__ : Any = new_scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowercase ( self : int , __A : List[Any]=None , **__A : Optional[int] ):
if scheduler is None:
snake_case__ : List[str] = self.scheduler_classes[0]
snake_case__ : str = self.get_scheduler_config(**UpperCamelCase__ )
snake_case__ : Dict = scheduler_class(**UpperCamelCase__ )
snake_case__ : Dict = self.scheduler_classes[0]
snake_case__ : Tuple = self.get_scheduler_config(**UpperCamelCase__ )
snake_case__ : List[str] = scheduler_class(**UpperCamelCase__ )
snake_case__ : Tuple = 1_0
snake_case__ : Tuple = self.dummy_model()
snake_case__ : int = self.dummy_sample_deter
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : Optional[Any] = model(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : Tuple = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
return sample
def _lowercase ( self : Dict ):
snake_case__ : int = dict(self.forward_default_kwargs )
snake_case__ : Dict = kwargs.pop("num_inference_steps" , UpperCamelCase__ )
for scheduler_class in self.scheduler_classes:
snake_case__ : Dict = self.get_scheduler_config()
snake_case__ : List[Any] = scheduler_class(**UpperCamelCase__ )
snake_case__ : List[str] = self.dummy_sample
snake_case__ : Dict = 0.1 * sample
if num_inference_steps is not None and hasattr(UpperCamelCase__ , "set_timesteps" ):
scheduler.set_timesteps(UpperCamelCase__ )
elif num_inference_steps is not None and not hasattr(UpperCamelCase__ , "set_timesteps" ):
snake_case__ : Any = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ : int = [residual + 0.2, residual + 0.1_5, residual + 0.1_0]
snake_case__ : Tuple = dummy_past_residuals[: scheduler.config.solver_order]
snake_case__ : Union[str, Any] = scheduler.timesteps[5]
snake_case__ : Optional[int] = scheduler.timesteps[6]
snake_case__ : Dict = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
snake_case__ : Tuple = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ , **UpperCamelCase__ ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowercase ( self : Tuple ):
# make sure that iterating over schedulers with same config names gives same results
# for defaults
snake_case__ : Optional[Any] = UniPCMultistepScheduler(**self.get_scheduler_config() )
snake_case__ : int = self.full_loop(scheduler=UpperCamelCase__ )
snake_case__ : List[Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
snake_case__ : Union[str, Any] = DPMSolverSinglestepScheduler.from_config(scheduler.config )
snake_case__ : Any = DEISMultistepScheduler.from_config(scheduler.config )
snake_case__ : Union[str, Any] = DPMSolverMultistepScheduler.from_config(scheduler.config )
snake_case__ : str = UniPCMultistepScheduler.from_config(scheduler.config )
snake_case__ : Union[str, Any] = self.full_loop(scheduler=UpperCamelCase__ )
snake_case__ : Union[str, Any] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def _lowercase ( self : Tuple ):
for timesteps in [2_5, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=UpperCamelCase__ )
def _lowercase ( self : int ):
self.check_over_configs(thresholding=UpperCamelCase__ )
for order in [1, 2, 3]:
for solver_type in ["bh1", "bh2"]:
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
thresholding=UpperCamelCase__ , prediction_type=UpperCamelCase__ , sample_max_value=UpperCamelCase__ , solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , )
def _lowercase ( self : Optional[int] ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=UpperCamelCase__ )
def _lowercase ( self : Any ):
for solver_type in ["bh1", "bh2"]:
for order in [1, 2, 3]:
for prediction_type in ["epsilon", "sample"]:
self.check_over_configs(
solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , prediction_type=UpperCamelCase__ , )
snake_case__ : List[str] = self.full_loop(
solver_order=UpperCamelCase__ , solver_type=UpperCamelCase__ , prediction_type=UpperCamelCase__ , )
assert not torch.isnan(UpperCamelCase__ ).any(), "Samples have nan numbers"
def _lowercase ( self : int ):
self.check_over_configs(lower_order_final=UpperCamelCase__ )
self.check_over_configs(lower_order_final=UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ):
for num_inference_steps in [1, 2, 3, 5, 1_0, 5_0, 1_0_0, 9_9_9, 1_0_0_0]:
self.check_over_forward(num_inference_steps=UpperCamelCase__ , time_step=0 )
def _lowercase ( self : Dict ):
snake_case__ : Optional[int] = self.full_loop()
snake_case__ : Optional[int] = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.2_4_6_4 ) < 1e-3
def _lowercase ( self : List[Any] ):
snake_case__ : str = self.full_loop(prediction_type="v_prediction" )
snake_case__ : str = torch.mean(torch.abs(UpperCamelCase__ ) )
assert abs(result_mean.item() - 0.1_0_1_4 ) < 1e-3
def _lowercase ( self : Optional[Any] ):
snake_case__ : Tuple = self.scheduler_classes[0]
snake_case__ : Any = self.get_scheduler_config(thresholding=UpperCamelCase__ , dynamic_thresholding_ratio=0 )
snake_case__ : Optional[Any] = scheduler_class(**UpperCamelCase__ )
snake_case__ : int = 1_0
snake_case__ : str = self.dummy_model()
snake_case__ : str = self.dummy_sample_deter.half()
scheduler.set_timesteps(UpperCamelCase__ )
for i, t in enumerate(scheduler.timesteps ):
snake_case__ : List[Any] = model(UpperCamelCase__ , UpperCamelCase__ )
snake_case__ : int = scheduler.step(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ ).prev_sample
assert sample.dtype == torch.floataa
def _lowercase ( self : Optional[Any] , **__A : Dict ):
for scheduler_class in self.scheduler_classes:
snake_case__ : List[Any] = self.get_scheduler_config(**UpperCamelCase__ )
snake_case__ : Optional[Any] = scheduler_class(**UpperCamelCase__ )
scheduler.set_timesteps(scheduler.config.num_train_timesteps )
assert len(scheduler.timesteps.unique() ) == scheduler.num_inference_steps
| 710 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] ):
snake_case__ : Optional[int] = []
for part_id in partition_order:
snake_case__ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(snake_case_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Union[str, Any] = spark.range(100 ).repartition(1 )
snake_case__ : Any = Spark(snake_case_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[Any] = spark.range(10 ).repartition(2 )
snake_case__ : Optional[Any] = [1, 0]
snake_case__ : Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions.
snake_case__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case__, snake_case__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[int] = spark.range(10 ).repartition(1 )
snake_case__ : Union[str, Any] = SparkExamplesIterable(snake_case_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
snake_case__ : Union[str, Any] = lambda snake_case_ : x.reverse()
snake_case__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] )
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case__ : Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(100 ).repartition(1 )
snake_case__ : Union[str, Any] = Spark(snake_case_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 25 | 0 |
from __future__ import annotations
__lowerCamelCase : List[str] = list[tuple[int, int]]
__lowerCamelCase : Union[str, Any] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Optional[Any] = ([-1, 0], [0, -1], [1, 0], [0, 1]) # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Tuple , __A : List[Any] , __A : List[Any] , __A : List[Any] , __A : Tuple , __A : Optional[int] , __A : Tuple , ):
snake_case__ : Optional[Any] = pos_x
snake_case__ : List[str] = pos_y
snake_case__ : Any = (pos_y, pos_x)
snake_case__ : Any = goal_x
snake_case__ : str = goal_y
snake_case__ : int = g_cost
snake_case__ : Tuple = parent
snake_case__ : Optional[int] = self.calculate_heuristic()
def _lowercase ( self : List[Any] ):
snake_case__ : Union[str, Any] = abs(self.pos_x - self.goal_x )
snake_case__ : Tuple = abs(self.pos_y - self.goal_y )
return dx + dy
def __lt__( self : Any , __A : Union[str, Any] ):
return self.f_cost < other.f_cost
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : str , __A : Any , __A : List[Any] ):
snake_case__ : Union[str, Any] = Node(start[1] , start[0] , goal[1] , goal[0] , 0 , _lowerCamelCase )
snake_case__ : int = Node(goal[1] , goal[0] , goal[1] , goal[0] , 9_9_9_9_9 , _lowerCamelCase )
snake_case__ : Tuple = [self.start]
snake_case__ : str = []
snake_case__ : Optional[Any] = False
def _lowercase ( self : List[Any] ):
while self.open_nodes:
# Open Nodes are sorted using __lt__
self.open_nodes.sort()
snake_case__ : Union[str, Any] = self.open_nodes.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ : str = True
return self.retrace_path(_lowerCamelCase )
self.closed_nodes.append(_lowerCamelCase )
snake_case__ : Union[str, Any] = self.get_successors(_lowerCamelCase )
for child_node in successors:
if child_node in self.closed_nodes:
continue
if child_node not in self.open_nodes:
self.open_nodes.append(_lowerCamelCase )
else:
# retrieve the best current path
snake_case__ : str = self.open_nodes.pop(self.open_nodes.index(_lowerCamelCase ) )
if child_node.g_cost < better_node.g_cost:
self.open_nodes.append(_lowerCamelCase )
else:
self.open_nodes.append(_lowerCamelCase )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Optional[Any] , __A : int ):
snake_case__ : Optional[int] = []
for action in delta:
snake_case__ : Optional[Any] = parent.pos_x + action[1]
snake_case__ : Tuple = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(_lowerCamelCase ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(
_lowerCamelCase , _lowerCamelCase , self.target.pos_y , self.target.pos_x , parent.g_cost + 1 , _lowerCamelCase , ) )
return successors
def _lowercase ( self : Tuple , __A : List[str] ):
snake_case__ : Any = node
snake_case__ : Optional[Any] = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : int = current_node.parent
path.reverse()
return path
if __name__ == "__main__":
__lowerCamelCase : int = (0, 0)
__lowerCamelCase : Dict = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
print("""------""")
__lowerCamelCase : Optional[int] = GreedyBestFirst(init, goal)
__lowerCamelCase : Tuple = greedy_bf.search()
if path:
for pos_x, pos_y in path:
__lowerCamelCase : List[Any] = 2
for elem in grid:
print(elem)
| 711 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 0 |
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Optional[int] ):
snake_case__ : List[Any] = TFCamembertModel.from_pretrained("jplu/tf-camembert-base" )
snake_case__ : List[Any] = tf.convert_to_tensor(
[[5, 1_2_1, 1_1, 6_6_0, 1_6, 7_3_0, 2_5_5_4_3, 1_1_0, 8_3, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
snake_case__ : Optional[int] = model(__A )["last_hidden_state"]
snake_case__ : Optional[Any] = tf.TensorShape((1, 1_0, 7_6_8) )
self.assertEqual(output.shape , __A )
# compare the actual values for a slice.
snake_case__ : Optional[int] = tf.convert_to_tensor(
[[[-0.0_2_5_4, 0.0_2_3_5, 0.1_0_2_7], [0.0_6_0_6, -0.1_8_1_1, -0.0_4_1_8], [-0.1_5_6_1, -0.1_1_2_7, 0.2_6_8_7]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1e-4 ) )
| 712 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 25 | 0 |
import flax.linen as nn
import jax.numpy as jnp
from .attention_flax import FlaxTransformeraDModel
from .resnet_flax import FlaxDownsampleaD, FlaxResnetBlockaD, FlaxUpsampleaD
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
a_ = 42
a_ = 42
a_ = 0.0
a_ = 1
a_ = 1
a_ = True
a_ = False
a_ = False
a_ = False
a_ = jnp.floataa
def _lowercase ( self : List[str] ):
snake_case__ : int = []
snake_case__ : List[Any] = []
for i in range(self.num_layers ):
snake_case__ : str = self.in_channels if i == 0 else self.out_channels
snake_case__ : str = FlaxResnetBlockaD(
in_channels=__A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
snake_case__ : Any = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
snake_case__ : Any = resnets
snake_case__ : int = attentions
if self.add_downsample:
snake_case__ : int = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , __A : Optional[Any] , __A : Any , __A : List[Any] , __A : Optional[Any]=True ):
snake_case__ : int = ()
for resnet, attn in zip(self.resnets , self.attentions ):
snake_case__ : str = resnet(__A , __A , deterministic=__A )
snake_case__ : Tuple = attn(__A , __A , deterministic=__A )
output_states += (hidden_states,)
if self.add_downsample:
snake_case__ : Optional[int] = self.downsamplers_a(__A )
output_states += (hidden_states,)
return hidden_states, output_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
a_ = 42
a_ = 42
a_ = 0.0
a_ = 1
a_ = True
a_ = jnp.floataa
def _lowercase ( self : List[str] ):
snake_case__ : Optional[int] = []
for i in range(self.num_layers ):
snake_case__ : int = self.in_channels if i == 0 else self.out_channels
snake_case__ : int = FlaxResnetBlockaD(
in_channels=__A , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
snake_case__ : Any = resnets
if self.add_downsample:
snake_case__ : Optional[Any] = FlaxDownsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Optional[Any] , __A : str , __A : Tuple , __A : Optional[Any]=True ):
snake_case__ : List[Any] = ()
for resnet in self.resnets:
snake_case__ : Optional[int] = resnet(__A , __A , deterministic=__A )
output_states += (hidden_states,)
if self.add_downsample:
snake_case__ : List[str] = self.downsamplers_a(__A )
output_states += (hidden_states,)
return hidden_states, output_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
a_ = 42
a_ = 42
a_ = 42
a_ = 0.0
a_ = 1
a_ = 1
a_ = True
a_ = False
a_ = False
a_ = False
a_ = jnp.floataa
def _lowercase ( self : Tuple ):
snake_case__ : Any = []
snake_case__ : List[str] = []
for i in range(self.num_layers ):
snake_case__ : Optional[int] = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case__ : Optional[Any] = self.prev_output_channel if i == 0 else self.out_channels
snake_case__ : Dict = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
snake_case__ : Any = FlaxTransformeraDModel(
in_channels=self.out_channels , n_heads=self.num_attention_heads , d_head=self.out_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , only_cross_attention=self.only_cross_attention , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
snake_case__ : Optional[int] = resnets
snake_case__ : Dict = attentions
if self.add_upsample:
snake_case__ : str = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : Tuple , __A : Union[str, Any] , __A : Dict , __A : int , __A : str , __A : int=True ):
for resnet, attn in zip(self.resnets , self.attentions ):
# pop res hidden states
snake_case__ : str = res_hidden_states_tuple[-1]
snake_case__ : str = res_hidden_states_tuple[:-1]
snake_case__ : Tuple = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case__ : Union[str, Any] = resnet(__A , __A , deterministic=__A )
snake_case__ : Union[str, Any] = attn(__A , __A , deterministic=__A )
if self.add_upsample:
snake_case__ : Optional[int] = self.upsamplers_a(__A )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
a_ = 42
a_ = 42
a_ = 42
a_ = 0.0
a_ = 1
a_ = True
a_ = jnp.floataa
def _lowercase ( self : str ):
snake_case__ : Union[str, Any] = []
for i in range(self.num_layers ):
snake_case__ : int = self.in_channels if (i == self.num_layers - 1) else self.out_channels
snake_case__ : Dict = self.prev_output_channel if i == 0 else self.out_channels
snake_case__ : Dict = FlaxResnetBlockaD(
in_channels=resnet_in_channels + res_skip_channels , out_channels=self.out_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
snake_case__ : Any = resnets
if self.add_upsample:
snake_case__ : List[str] = FlaxUpsampleaD(self.out_channels , dtype=self.dtype )
def __call__( self : str , __A : Optional[int] , __A : Optional[Any] , __A : List[Any] , __A : int=True ):
for resnet in self.resnets:
# pop res hidden states
snake_case__ : Optional[int] = res_hidden_states_tuple[-1]
snake_case__ : Optional[int] = res_hidden_states_tuple[:-1]
snake_case__ : List[Any] = jnp.concatenate((hidden_states, res_hidden_states) , axis=-1 )
snake_case__ : List[str] = resnet(__A , __A , deterministic=__A )
if self.add_upsample:
snake_case__ : Tuple = self.upsamplers_a(__A )
return hidden_states
class SCREAMING_SNAKE_CASE__ ( nn.Module ):
"""simple docstring"""
a_ = 42
a_ = 0.0
a_ = 1
a_ = 1
a_ = False
a_ = False
a_ = jnp.floataa
def _lowercase ( self : int ):
# there is always at least one resnet
snake_case__ : Dict = [
FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
]
snake_case__ : Tuple = []
for _ in range(self.num_layers ):
snake_case__ : str = FlaxTransformeraDModel(
in_channels=self.in_channels , n_heads=self.num_attention_heads , d_head=self.in_channels // self.num_attention_heads , depth=1 , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
attentions.append(__A )
snake_case__ : int = FlaxResnetBlockaD(
in_channels=self.in_channels , out_channels=self.in_channels , dropout_prob=self.dropout , dtype=self.dtype , )
resnets.append(__A )
snake_case__ : str = resnets
snake_case__ : str = attentions
def __call__( self : Optional[Any] , __A : int , __A : Optional[int] , __A : Optional[int] , __A : Optional[Any]=True ):
snake_case__ : Optional[Any] = self.resnets[0](__A , __A )
for attn, resnet in zip(self.attentions , self.resnets[1:] ):
snake_case__ : List[Any] = attn(__A , __A , deterministic=__A )
snake_case__ : int = resnet(__A , __A , deterministic=__A )
return hidden_states
| 713 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE ( snake_case_ : dict ):
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ):
snake_case__ : Optional[int] = XGBClassifier()
classifier.fit(snake_case_ , snake_case_ )
return classifier
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = load_iris()
snake_case__, snake_case__ : str = data_handling(snake_case_ )
snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split(
snake_case_ , snake_case_ , test_size=0.25 )
snake_case__ : Dict = iris["target_names"]
# Create an XGBoost Classifier from the training data
snake_case__ : Dict = xgboost(snake_case_ , snake_case_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 25 | 0 |
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase__ ):
"""simple docstring"""
a_ = "Salesforce/blip-image-captioning-base"
a_ = (
"This is a tool that generates a description of an image. It takes an input named `image` which should be the "
"image to caption, and returns a text that contains the description in English."
)
a_ = "image_captioner"
a_ = AutoModelForVisionaSeq
a_ = ["image"]
a_ = ["text"]
def __init__( self : Tuple , *__A : str , **__A : Tuple ):
requires_backends(self , ["vision"] )
super().__init__(*__A , **__A )
def _lowercase ( self : Optional[int] , __A : "Image" ):
return self.pre_processor(images=__A , return_tensors="pt" )
def _lowercase ( self : List[Any] , __A : Optional[Any] ):
return self.model.generate(**__A )
def _lowercase ( self : Tuple , __A : List[str] ):
return self.pre_processor.batch_decode(__A , skip_special_tokens=__A )[0].strip()
| 714 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ):
snake_case__ : Tuple = args.log_outputs
snake_case__ : Union[str, Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
snake_case__ : List[str] = load_metric("wer" )
snake_case__ : List[str] = load_metric("cer" )
# compute metrics
snake_case__ : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
snake_case__ : List[str] = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
snake_case__ : Dict = F'''WER: {wer_result}\nCER: {cer_result}'''
print(snake_case_ )
with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case__ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt'''
snake_case__ : int = F'''log_{dataset_id}_targets.txt'''
with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Union[str, Any] , snake_case_ : Any ):
p.write(F'''{i}''' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(F'''{i}''' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case_ , with_indices=snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : List[Any] = "[,?.!\-\;\:\"โ%โโ๏ฟฝโโโฆโ]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case__ : Optional[int] = re.sub(snake_case_ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case__ : Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
snake_case__ : Optional[int] = " ".join(text.split(snake_case_ ) )
return text
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# load dataset
snake_case__ : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case__ : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case__ : List[Any] = feature_extractor.sampling_rate
# resample audio
snake_case__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
snake_case__ : int = 0 if torch.cuda.is_available() else -1
snake_case__ : List[str] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Any ):
snake_case__ : Union[str, Any] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case__ : Optional[int] = prediction["text"]
snake_case__ : Optional[Any] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
snake_case__ : Any = dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with ๐ค Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with ๐ค Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__lowerCamelCase : str = parser.parse_args()
main(args)
| 25 | 0 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ): # This function is recursive
snake_case__ : List[str] = len(_UpperCAmelCase )
# If the array contains only one element, we return it (it's the stop condition of
# recursion)
if array_length <= 1:
return array
# Else
snake_case__ : str = array[0]
snake_case__ : int = False
snake_case__ : Dict = 1
snake_case__ : List[Any] = []
while not is_found and i < array_length:
if array[i] < pivot:
snake_case__ : List[Any] = True
snake_case__ : Optional[int] = [element for element in array[i:] if element >= array[i]]
snake_case__ : Dict = longest_subsequence(_UpperCAmelCase )
if len(_UpperCAmelCase ) > len(_UpperCAmelCase ):
snake_case__ : Optional[Any] = temp_array
else:
i += 1
snake_case__ : Dict = [element for element in array[1:] if element >= pivot]
snake_case__ : List[str] = [pivot, *longest_subsequence(_UpperCAmelCase )]
if len(_UpperCAmelCase ) > len(_UpperCAmelCase ):
return temp_array
else:
return longest_subseq
if __name__ == "__main__":
import doctest
doctest.testmod()
| 715 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
a_ = Features({"text": Value("string" )} )
a_ = Features({"labels": ClassLabel} )
a_ = "text"
a_ = "labels"
def _lowercase ( self : Tuple , __A : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
snake_case__ : Any = copy.deepcopy(self )
snake_case__ : Optional[Any] = self.label_schema.copy()
snake_case__ : List[str] = features[self.label_column]
snake_case__ : Dict = label_schema
return task_template
@property
def _lowercase ( self : Tuple ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 25 | 0 |
import numpy as np
import torch
from torch.utils.data import DataLoader
from accelerate.utils.dataclasses import DistributedType
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : List[str]=2 , __A : Dict=3 , __A : List[Any]=6_4 , __A : Dict=None ):
snake_case__ : List[Any] = np.random.default_rng(__a )
snake_case__ : Dict = length
snake_case__ : str = rng.normal(size=(length,) ).astype(np.floataa )
snake_case__ : int = a * self.x + b + rng.normal(scale=0.1 , size=(length,) ).astype(np.floataa )
def __len__( self : List[Any] ):
return self.length
def __getitem__( self : Optional[int] , __A : int ):
return {"x": self.x[i], "y": self.y[i]}
class SCREAMING_SNAKE_CASE__ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : List[str] , __A : int=0 , __A : Union[str, Any]=0 , __A : List[Any]=False ):
super().__init__()
snake_case__ : Optional[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case__ : List[Any] = torch.nn.Parameter(torch.tensor([2, 3] ).float() )
snake_case__ : str = True
def _lowercase ( self : List[Any] , __A : Optional[Any]=None ):
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
snake_case__ : Optional[Any] = False
return x * self.a[0] + self.b[0]
class SCREAMING_SNAKE_CASE__ ( torch.nn.Module ):
"""simple docstring"""
def __init__( self : Any , __A : List[Any]=0 , __A : List[str]=0 , __A : Union[str, Any]=False ):
super().__init__()
snake_case__ : int = torch.nn.Parameter(torch.tensor(__a ).float() )
snake_case__ : Tuple = torch.nn.Parameter(torch.tensor(__a ).float() )
snake_case__ : List[str] = True
def _lowercase ( self : Any , __A : List[str]=None ):
if self.first_batch:
print(f'''Model dtype: {self.a.dtype}, {self.b.dtype}. Input dtype: {x.dtype}''' )
snake_case__ : Dict = False
return x * self.a + self.b
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : int = 16 ):
from datasets import load_dataset
from transformers import AutoTokenizer
snake_case__ : List[str] = AutoTokenizer.from_pretrained("bert-base-cased" )
snake_case__ : Optional[Any] = {'train': 'tests/test_samples/MRPC/train.csv', 'validation': 'tests/test_samples/MRPC/dev.csv'}
snake_case__ : Tuple = load_dataset("csv" , data_files=snake_case_ )
snake_case__ : Optional[int] = datasets['train'].unique("label" )
snake_case__ : Optional[Any] = {v: i for i, v in enumerate(snake_case_ )}
def tokenize_function(snake_case_ : List[Any] ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Tuple = tokenizer(
examples["sentence1"] , examples["sentence2"] , truncation=snake_case_ , max_length=snake_case_ , padding="max_length" )
if "label" in examples:
snake_case__ : Tuple = [label_to_id[l] for l in examples['label']]
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ : List[str] = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=["sentence1", "sentence2", "label"] , )
def collate_fn(snake_case_ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(snake_case_ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case__ : Tuple = DataLoader(tokenized_datasets["train"] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=2 )
snake_case__ : Union[str, Any] = DataLoader(tokenized_datasets["validation"] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=1 )
return train_dataloader, eval_dataloader
| 716 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_vision_model"
def __init__( self : List[Any] , __A : Dict=1_4_0_8 , __A : Tuple=6_1_4_4 , __A : str=3_9 , __A : int=1_6 , __A : str=2_2_4 , __A : Any=1_4 , __A : Dict="gelu" , __A : List[Any]=1e-6 , __A : Any=0.0 , __A : List[Any]=1e-1_0 , __A : Union[str, Any]=True , **__A : Tuple , ):
super().__init__(**__A )
snake_case__ : List[str] = hidden_size
snake_case__ : Optional[int] = intermediate_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : str = patch_size
snake_case__ : int = image_size
snake_case__ : int = initializer_range
snake_case__ : Optional[int] = attention_dropout
snake_case__ : str = layer_norm_eps
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Tuple = qkv_bias
@classmethod
def _lowercase ( cls : List[str] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : str = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : Union[str, Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_qformer"
def __init__( self : Any , __A : Union[str, Any]=3_0_5_2_2 , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=1_2 , __A : Dict=1_2 , __A : Dict=3_0_7_2 , __A : List[str]="gelu" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Any=5_1_2 , __A : Optional[int]=0.0_2 , __A : List[str]=1e-1_2 , __A : Any=0 , __A : Optional[Any]="absolute" , __A : str=2 , __A : Any=1_4_0_8 , **__A : List[str] , ):
super().__init__(pad_token_id=__A , **__A )
snake_case__ : Dict = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : int = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : Dict = cross_attention_frequency
snake_case__ : List[str] = encoder_hidden_size
@classmethod
def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Optional[int] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : Tuple = cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : List[Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip"
a_ = True
def __init__( self : List[str] , __A : Optional[Any]=None , __A : Tuple=None , __A : Optional[int]=None , __A : Optional[Any]=3_2 , **__A : Optional[int] ):
super().__init__(**__A )
if vision_config is None:
snake_case__ : Any = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
snake_case__ : Optional[Any] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
snake_case__ : Optional[int] = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
snake_case__ : List[Any] = InstructBlipVisionConfig(**__A )
snake_case__ : Union[str, Any] = InstructBlipQFormerConfig(**__A )
snake_case__ : Dict = text_config["model_type"] if "model_type" in text_config else "opt"
snake_case__ : List[Any] = CONFIG_MAPPING[text_model_type](**__A )
snake_case__ : Union[str, Any] = self.text_config.tie_word_embeddings
snake_case__ : Tuple = self.text_config.is_encoder_decoder
snake_case__ : str = num_query_tokens
snake_case__ : Dict = self.vision_config.hidden_size
snake_case__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case__ : int = 1.0
snake_case__ : Optional[int] = 0.0_2
@classmethod
def _lowercase ( cls : List[str] , __A : InstructBlipVisionConfig , __A : InstructBlipQFormerConfig , __A : PretrainedConfig , **__A : int , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def _lowercase ( self : Optional[int] ):
snake_case__ : Any = copy.deepcopy(self.__dict__ )
snake_case__ : Optional[Any] = self.vision_config.to_dict()
snake_case__ : List[str] = self.qformer_config.to_dict()
snake_case__ : List[Any] = self.text_config.to_dict()
snake_case__ : List[Any] = self.__class__.model_type
return output
| 25 | 0 |
from __future__ import annotations
__lowerCamelCase : Optional[Any] = 1.60_21e-19 # units = C
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] , snake_case_ : Union[str, Any] , ):
if (conductivity, electron_conc, mobility).count(0 ) != 1:
raise ValueError("You cannot supply more or less than 2 values" )
elif conductivity < 0:
raise ValueError("Conductivity cannot be negative" )
elif electron_conc < 0:
raise ValueError("Electron concentration cannot be negative" )
elif mobility < 0:
raise ValueError("mobility cannot be negative" )
elif conductivity == 0:
return (
"conductivity",
mobility * electron_conc * ELECTRON_CHARGE,
)
elif electron_conc == 0:
return (
"electron_conc",
conductivity / (mobility * ELECTRON_CHARGE),
)
else:
return (
"mobility",
conductivity / (electron_conc * ELECTRON_CHARGE),
)
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
def SCREAMING_SNAKE_CASE ( snake_case_ : list ):
if len(snake_case_ ) <= 1:
return lst
snake_case__ : List[Any] = 1
while i < len(snake_case_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
snake_case__, snake_case__ : Tuple = lst[i], lst[i - 1]
i -= 1
if i == 0:
snake_case__ : Union[str, Any] = 1
return lst
if __name__ == "__main__":
__lowerCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 25 | 0 |
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] ):
snake_case__ : dict[str, TrieNode] = {} # Mapping from char to TrieNode
snake_case__ : List[Any] = False
def _lowercase ( self : str , __A : List[Any] ):
for word in words:
self.insert(_lowercase )
def _lowercase ( self : Tuple , __A : Optional[Any] ):
snake_case__ : Any = self
for char in word:
if char not in curr.nodes:
snake_case__ : Optional[int] = TrieNode()
snake_case__ : List[str] = curr.nodes[char]
snake_case__ : List[Any] = True
def _lowercase ( self : List[Any] , __A : int ):
snake_case__ : Tuple = self
for char in word:
if char not in curr.nodes:
return False
snake_case__ : Optional[int] = curr.nodes[char]
return curr.is_leaf
def _lowercase ( self : Dict , __A : List[Any] ):
def _delete(__A : Dict , __A : List[str] , __A : List[Any] ) -> bool:
if index == len(_lowercase ):
# If word does not exist
if not curr.is_leaf:
return False
snake_case__ : Optional[Any] = False
return len(curr.nodes ) == 0
snake_case__ : Optional[int] = word[index]
snake_case__ : Any = curr.nodes.get(_lowercase )
# If char not in current trie node
if not char_node:
return False
# Flag to check if node can be deleted
snake_case__ : Union[str, Any] = _delete(_lowercase , _lowercase , index + 1 )
if delete_curr:
del curr.nodes[char]
return len(curr.nodes ) == 0
return delete_curr
_delete(self , _lowercase , 0 )
def SCREAMING_SNAKE_CASE ( snake_case_ : TrieNode , snake_case_ : str ):
if node.is_leaf:
print(_lowerCamelCase , end=" " )
for key, value in node.nodes.items():
print_words(_lowerCamelCase , word + key )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Union[str, Any] = """banana bananas bandana band apple all beast""".split()
snake_case__ : List[Any] = TrieNode()
root.insert_many(_lowerCamelCase )
# print_words(root, "")
assert all(root.find(_lowerCamelCase ) for word in words )
assert root.find("banana" )
assert not root.find("bandanas" )
assert not root.find("apps" )
assert root.find("apple" )
assert root.find("all" )
root.delete("all" )
assert not root.find("all" )
root.delete("banana" )
assert not root.find("banana" )
assert root.find("bananas" )
return True
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : bool ):
print(str(_lowerCamelCase ) , "works!" if passes else "doesn't work :(" )
def SCREAMING_SNAKE_CASE ( ):
assert test_trie()
def SCREAMING_SNAKE_CASE ( ):
print_results("Testing trie functionality" , test_trie() )
if __name__ == "__main__":
main()
| 718 |
from __future__ import annotations
import time
__lowerCamelCase : str = list[tuple[int, int]]
__lowerCamelCase : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ):
snake_case__ : Optional[int] = pos_x
snake_case__ : Dict = pos_y
snake_case__ : int = (pos_y, pos_x)
snake_case__ : Optional[int] = goal_x
snake_case__ : Tuple = goal_y
snake_case__ : str = parent
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ):
snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A )
snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A )
snake_case__ : int = [self.start]
snake_case__ : Union[str, Any] = False
def _lowercase ( self : Dict ):
while self.node_queue:
snake_case__ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ : Optional[Any] = True
return self.retrace_path(__A )
snake_case__ : int = self.get_successors(__A )
for node in successors:
self.node_queue.append(__A )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Union[str, Any] , __A : Node ):
snake_case__ : str = []
for action in delta:
snake_case__ : str = parent.pos_x + action[1]
snake_case__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) )
return successors
def _lowercase ( self : Optional[Any] , __A : Node | None ):
snake_case__ : Tuple = node
snake_case__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : Tuple = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : str , __A : int ):
snake_case__ : str = BreadthFirstSearch(__A , __A )
snake_case__ : int = BreadthFirstSearch(__A , __A )
snake_case__ : Tuple = False
def _lowercase ( self : Optional[Any] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 )
snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ : List[str] = True
return self.retrace_bidirectional_path(
__A , __A )
snake_case__ : Union[str, Any] = current_bwd_node
snake_case__ : Dict = current_fwd_node
snake_case__ : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(__A ),
self.bwd_bfs: self.bwd_bfs.get_successors(__A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Any , __A : Node , __A : Node ):
snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A )
snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowerCamelCase : str = (0, 0)
__lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : Any = time.time()
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal)
__lowerCamelCase : str = bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
__lowerCamelCase : Optional[Any] = time.time()
__lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
__lowerCamelCase : str = bd_bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 25 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Any ):
snake_case__ : str = tempfile.mkdtemp()
# fmt: off
snake_case__ : Tuple = ["""l""", """o""", """w""", """e""", """r""", """s""", """t""", """i""", """d""", """n""", """lo""", """l</w>""", """w</w>""", """r</w>""", """t</w>""", """low</w>""", """er</w>""", """lowest</w>""", """newer</w>""", """wider""", """<unk>""", """<|startoftext|>""", """<|endoftext|>"""]
# fmt: on
snake_case__ : Optional[int] = dict(zip(A__ , range(len(A__ ) ) ) )
snake_case__ : Union[str, Any] = ["""#version: 0.2""", """l o""", """lo w</w>""", """e r</w>""", """"""]
snake_case__ : Tuple = {"""unk_token""": """<unk>"""}
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : Optional[int] = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["merges_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(A__ ) + "\n" )
with open(self.merges_file , "w" , encoding="utf-8" ) as fp:
fp.write("\n".join(A__ ) )
snake_case__ : Dict = {
"""do_resize""": True,
"""size""": 2_0,
"""do_center_crop""": True,
"""crop_size""": 1_8,
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
}
snake_case__ : Tuple = os.path.join(self.tmpdirname , A__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(A__ , A__ )
def _lowercase ( self : Dict , **__A : Union[str, Any] ):
return CLIPTokenizer.from_pretrained(self.tmpdirname , **A__ )
def _lowercase ( self : List[str] , **__A : int ):
return CLIPTokenizerFast.from_pretrained(self.tmpdirname , **A__ )
def _lowercase ( self : Union[str, Any] , **__A : Optional[Any] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **A__ )
def _lowercase ( self : Optional[int] ):
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Optional[int] ):
snake_case__ : Dict = [np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )]
snake_case__ : int = [Image.fromarray(np.moveaxis(A__ , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _lowercase ( self : Optional[int] ):
snake_case__ : int = self.get_tokenizer()
snake_case__ : int = self.get_rust_tokenizer()
snake_case__ : Any = self.get_image_processor()
snake_case__ : List[Any] = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
processor_slow.save_pretrained(self.tmpdirname )
snake_case__ : int = CLIPSegProcessor.from_pretrained(self.tmpdirname , use_fast=A__ )
snake_case__ : int = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
processor_fast.save_pretrained(self.tmpdirname )
snake_case__ : Optional[int] = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , A__ )
self.assertIsInstance(processor_fast.tokenizer , A__ )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , A__ )
self.assertIsInstance(processor_fast.image_processor , A__ )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = CLIPSegProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Tuple = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case__ : Optional[Any] = self.get_image_processor(do_normalize=A__ , padding_value=1.0 )
snake_case__ : Dict = CLIPSegProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=A__ , padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , A__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , A__ )
def _lowercase ( self : int ):
snake_case__ : Dict = self.get_image_processor()
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : int = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
snake_case__ : Optional[int] = self.prepare_image_inputs()
snake_case__ : str = image_processor(A__ , return_tensors="np" )
snake_case__ : Optional[int] = processor(images=A__ , return_tensors="np" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase ( self : List[Any] ):
snake_case__ : Union[str, Any] = self.get_image_processor()
snake_case__ : List[Any] = self.get_tokenizer()
snake_case__ : Optional[int] = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
snake_case__ : Dict = """lower newer"""
snake_case__ : Dict = processor(text=A__ )
snake_case__ : Union[str, Any] = tokenizer(A__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = self.get_image_processor()
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : Union[str, Any] = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
snake_case__ : List[Any] = """lower newer"""
snake_case__ : Optional[int] = self.prepare_image_inputs()
snake_case__ : Dict = processor(text=A__ , images=A__ )
self.assertListEqual(list(inputs.keys() ) , ["input_ids", "attention_mask", "pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def _lowercase ( self : Tuple ):
snake_case__ : int = self.get_image_processor()
snake_case__ : Any = self.get_tokenizer()
snake_case__ : Optional[Any] = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
snake_case__ : List[Any] = self.prepare_image_inputs()
snake_case__ : int = self.prepare_image_inputs()
snake_case__ : Union[str, Any] = processor(images=A__ , visual_prompt=A__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "conditional_pixel_values"] )
# test if it raises when no input is passed
with pytest.raises(A__ ):
processor()
def _lowercase ( self : Dict ):
snake_case__ : Optional[Any] = self.get_image_processor()
snake_case__ : str = self.get_tokenizer()
snake_case__ : Tuple = CLIPSegProcessor(tokenizer=A__ , image_processor=A__ )
snake_case__ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : List[Any] = processor.batch_decode(A__ )
snake_case__ : int = tokenizer.batch_decode(A__ )
self.assertListEqual(A__ , A__ )
| 719 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __A : Dict , __A : int=7 , __A : Optional[Any]=3 , __A : List[str]=3_0 , __A : List[Any]=4_0_0 , __A : Union[str, Any]=True , __A : List[Any]=None , __A : Optional[Any]=True , __A : Tuple=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : Any=1 / 2_5_5 , __A : Optional[int]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : str = min_resolution
snake_case__ : Tuple = max_resolution
snake_case__ : List[Any] = do_resize
snake_case__ : Dict = size
snake_case__ : List[str] = do_normalize
snake_case__ : Optional[int] = image_mean
snake_case__ : Optional[int] = image_std
snake_case__ : Any = do_rescale
snake_case__ : Optional[int] = rescale_factor
snake_case__ : int = do_pad
def _lowercase ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[int] , __A : Dict , __A : List[Any]=False ):
if not batched:
snake_case__ : List[str] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : Tuple = image.size
else:
snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case__ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case__ : List[Any] = self.size["shortest_edge"]
snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Dict = self.size["shortest_edge"]
snake_case__ : Dict = self.size["shortest_edge"]
else:
snake_case__ : str = []
for image in image_inputs:
snake_case__, snake_case__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Dict = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : Tuple = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : int ):
snake_case__ : Tuple = ConditionalDetrImageProcessingTester(self )
@property
def _lowercase ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Any ):
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : List[str] ):
snake_case__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : Union[str, Any] ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : str = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : List[Any] ):
# prepare image and target
snake_case__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Union[str, Any] = json.loads(f.read() )
snake_case__ : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : Tuple = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case__ : int = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : str ):
# prepare image, target and masks_path
snake_case__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : int = json.loads(f.read() )
snake_case__ : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : Optional[int] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : str = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 0 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : Tuple = (1 + 24 * n) ** 0.5
return ((1 + root) / 6) % 1 == 0
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 5000 ):
snake_case__ : Optional[Any] = [(i * (3 * i - 1)) // 2 for i in range(1 , lowerCAmelCase__ )]
for i, pentagonal_i in enumerate(lowerCAmelCase__ ):
for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
snake_case__ : Any = pentagonal_nums[j]
snake_case__ : Tuple = pentagonal_i + pentagonal_j
snake_case__ : Tuple = pentagonal_j - pentagonal_i
if is_pentagonal(lowerCAmelCase__ ) and is_pentagonal(lowerCAmelCase__ ):
return b
return -1
if __name__ == "__main__":
print(f"{solution() = }")
| 720 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__lowerCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__lowerCamelCase : str = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using KullbackโLeibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__lowerCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def _lowercase ( self : Union[str, Any] , __A : Dict , __A : List[str] , __A : int=None , __A : List[Any]=None , __A : Optional[int]=None , __A : List[Any]=None , __A : Union[str, Any]="auto" , __A : Optional[Any]=-1 , __A : Optional[Any]=0.9 , __A : Any=5 , __A : List[Any]=5_0_0 , __A : Tuple="gpt2-large" , __A : Optional[Any]=-1 , __A : str=1_0_2_4 , __A : Tuple=2_5 , __A : str=5 , __A : Optional[int]=True , __A : Any=2_5 , ):
snake_case__ : List[Any] = compute_mauve(
p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , )
return out
| 25 | 0 |
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_torch_available, is_vision_available
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MgpstrProcessor, ViTImageProcessor
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a_ = ViTImageProcessor if is_vision_available() else None
@property
def _lowercase ( self : Optional[int] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : List[str] ):
snake_case__ : Any = (3, 3_2, 1_2_8)
snake_case__ : str = tempfile.mkdtemp()
# fmt: off
snake_case__ : Union[str, Any] = ["[GO]", "[s]", "0", "1", "2", "3", "4", "5", "6", "7", "8", "9", "a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o", "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z"]
# fmt: on
snake_case__ : Dict = dict(zip(UpperCamelCase__ , range(len(UpperCamelCase__ ) ) ) )
snake_case__ : Dict = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["vocab_file"] )
with open(self.vocab_file , "w" , encoding="utf-8" ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + "\n" )
snake_case__ : str = {
"do_normalize": False,
"do_resize": True,
"image_processor_type": "ViTImageProcessor",
"resample": 3,
"size": {"height": 3_2, "width": 1_2_8},
}
snake_case__ : Tuple = os.path.join(self.tmpdirname , UpperCamelCase__ )
with open(self.image_processor_file , "w" , encoding="utf-8" ) as fp:
json.dump(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] , **__A : int ):
return MgpstrTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _lowercase ( self : int , **__A : Optional[int] ):
return ViTImageProcessor.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _lowercase ( self : Tuple ):
shutil.rmtree(self.tmpdirname )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Any = np.random.randint(2_5_5 , size=(3, 3_0, 4_0_0) , dtype=np.uinta )
snake_case__ : List[str] = Image.fromarray(np.moveaxis(UpperCamelCase__ , 0 , -1 ) )
return image_input
def _lowercase ( self : Optional[int] ):
snake_case__ : int = self.get_tokenizer()
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : str = MgpstrProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
snake_case__ : List[str] = MgpstrProcessor.from_pretrained(self.tmpdirname , use_fast=UpperCamelCase__ )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def _lowercase ( self : int ):
snake_case__ : Optional[Any] = self.get_tokenizer()
snake_case__ : Tuple = self.get_image_processor()
snake_case__ : Union[str, Any] = MgpstrProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
processor.save_pretrained(self.tmpdirname )
snake_case__ : Optional[Any] = self.get_tokenizer(bos_token="(BOS)" , eos_token="(EOS)" )
snake_case__ : Union[str, Any] = self.get_image_processor(do_normalize=UpperCamelCase__ , padding_value=1.0 )
snake_case__ : Any = MgpstrProcessor.from_pretrained(
self.tmpdirname , bos_token="(BOS)" , eos_token="(EOS)" , do_normalize=UpperCamelCase__ , padding_value=1.0 )
self.assertEqual(processor.char_tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.char_tokenizer , UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , UpperCamelCase__ )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : List[Any] = self.get_image_processor()
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : int = MgpstrProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
snake_case__ : Tuple = self.prepare_image_inputs()
snake_case__ : Union[str, Any] = image_processor(UpperCamelCase__ , return_tensors="np" )
snake_case__ : Union[str, Any] = processor(images=UpperCamelCase__ , return_tensors="np" )
for key in input_image_proc.keys():
self.assertAlmostEqual(input_image_proc[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _lowercase ( self : Tuple ):
snake_case__ : str = self.get_image_processor()
snake_case__ : Tuple = self.get_tokenizer()
snake_case__ : Optional[int] = MgpstrProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
snake_case__ : int = "test"
snake_case__ : List[str] = processor(text=UpperCamelCase__ )
snake_case__ : int = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _lowercase ( self : int ):
snake_case__ : str = self.get_image_processor()
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : Any = MgpstrProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
snake_case__ : List[Any] = "test"
snake_case__ : int = self.prepare_image_inputs()
snake_case__ : Optional[Any] = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , ["pixel_values", "labels"] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def _lowercase ( self : Any ):
snake_case__ : Optional[Any] = self.get_image_processor()
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : str = MgpstrProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
snake_case__ : str = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9], [3, 4, 3, 1, 1, 8, 9]]
snake_case__ : Optional[int] = processor.char_decode(UpperCamelCase__ )
snake_case__ : Dict = tokenizer.batch_decode(UpperCamelCase__ )
snake_case__ : Optional[int] = [seq.replace(" " , "" ) for seq in decoded_tok]
self.assertListEqual(UpperCamelCase__ , UpperCamelCase__ )
def _lowercase ( self : int ):
snake_case__ : List[str] = self.get_image_processor()
snake_case__ : Optional[int] = self.get_tokenizer()
snake_case__ : Optional[Any] = MgpstrProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
snake_case__ : str = None
snake_case__ : Any = self.prepare_image_inputs()
snake_case__ : int = processor(text=UpperCamelCase__ , images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = self.get_image_processor()
snake_case__ : Dict = self.get_tokenizer()
snake_case__ : Optional[int] = MgpstrProcessor(tokenizer=UpperCamelCase__ , image_processor=UpperCamelCase__ )
snake_case__ : Optional[Any] = torch.randn(1 , 2_7 , 3_8 )
snake_case__ : Union[str, Any] = torch.randn(1 , 2_7 , 5_0_2_5_7 )
snake_case__ : Any = torch.randn(1 , 2_7 , 3_0_5_2_2 )
snake_case__ : List[str] = processor.batch_decode([char_input, bpe_input, wp_input] )
self.assertListEqual(list(results.keys() ) , ["generated_text", "scores", "char_preds", "bpe_preds", "wp_preds"] )
| 721 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCamelCase : Union[str, Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCamelCase : List[Any] = concatenate_datasets
__lowerCamelCase : List[str] = DownloadConfig
__lowerCamelCase : Union[str, Any] = DownloadManager
__lowerCamelCase : str = DownloadMode
__lowerCamelCase : Union[str, Any] = DownloadConfig
__lowerCamelCase : List[str] = DownloadMode
__lowerCamelCase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 25 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Any = {
"""configuration_lxmert""": ["""LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LxmertConfig"""],
"""tokenization_lxmert""": ["""LxmertTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""LxmertTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
"""LxmertEncoder""",
"""LxmertForPreTraining""",
"""LxmertForQuestionAnswering""",
"""LxmertModel""",
"""LxmertPreTrainedModel""",
"""LxmertVisualFeatureEncoder""",
"""LxmertXLayer""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFLxmertForPreTraining""",
"""TFLxmertMainLayer""",
"""TFLxmertModel""",
"""TFLxmertPreTrainedModel""",
"""TFLxmertVisualFeatureEncoder""",
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 700 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : str = [True] * limit
snake_case__ : str = False
snake_case__ : str = False
snake_case__ : str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
snake_case__ : Optional[Any] = i * 2
while index < limit:
snake_case__ : Union[str, Any] = False
snake_case__ : Any = index + i
snake_case__ : Optional[Any] = [2]
for i in range(3 , snake_case_ , 2 ):
if is_prime[i]:
primes.append(snake_case_ )
return primes
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000000 ):
snake_case__ : Optional[int] = prime_sieve(snake_case_ )
snake_case__ : List[Any] = 0
snake_case__ : List[str] = 0
for i in range(len(snake_case_ ) ):
for j in range(i + length , len(snake_case_ ) ):
snake_case__ : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
snake_case__ : Tuple = j - i
snake_case__ : str = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 25 | 0 |
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Tuple = {"""vocab_file""": """vocab.txt""", """tokenizer_file""": """tokenizer.json"""}
__lowerCamelCase : int = {
"""vocab_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"""
),
"""distilbert-base-german-cased""": """https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt""",
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"""
),
},
"""tokenizer_file""": {
"""distilbert-base-uncased""": """https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json""",
"""distilbert-base-uncased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-cased""": """https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json""",
"""distilbert-base-cased-distilled-squad""": (
"""https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"""
),
"""distilbert-base-german-cased""": (
"""https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"""
),
"""distilbert-base-multilingual-cased""": (
"""https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"""
),
},
}
__lowerCamelCase : str = {
"""distilbert-base-uncased""": 512,
"""distilbert-base-uncased-distilled-squad""": 512,
"""distilbert-base-cased""": 512,
"""distilbert-base-cased-distilled-squad""": 512,
"""distilbert-base-german-cased""": 512,
"""distilbert-base-multilingual-cased""": 512,
}
__lowerCamelCase : Dict = {
"""distilbert-base-uncased""": {"""do_lower_case""": True},
"""distilbert-base-uncased-distilled-squad""": {"""do_lower_case""": True},
"""distilbert-base-cased""": {"""do_lower_case""": False},
"""distilbert-base-cased-distilled-squad""": {"""do_lower_case""": False},
"""distilbert-base-german-cased""": {"""do_lower_case""": False},
"""distilbert-base-multilingual-cased""": {"""do_lower_case""": False},
}
class SCREAMING_SNAKE_CASE__ ( _UpperCAmelCase ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = ["input_ids", "attention_mask"]
a_ = DistilBertTokenizer
def __init__( self : int , __A : Any=None , __A : Union[str, Any]=None , __A : str=True , __A : Union[str, Any]="[UNK]" , __A : Tuple="[SEP]" , __A : List[Any]="[PAD]" , __A : List[Any]="[CLS]" , __A : Tuple="[MASK]" , __A : str=True , __A : List[str]=None , **__A : Optional[Any] , ):
super().__init__(
lowercase__ , tokenizer_file=lowercase__ , do_lower_case=lowercase__ , unk_token=lowercase__ , sep_token=lowercase__ , pad_token=lowercase__ , cls_token=lowercase__ , mask_token=lowercase__ , tokenize_chinese_chars=lowercase__ , strip_accents=lowercase__ , **lowercase__ , )
snake_case__ : Optional[Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("lowercase" , lowercase__ ) != do_lower_case
or normalizer_state.get("strip_accents" , lowercase__ ) != strip_accents
or normalizer_state.get("handle_chinese_chars" , lowercase__ ) != tokenize_chinese_chars
):
snake_case__ : Dict = getattr(lowercase__ , normalizer_state.pop("type" ) )
snake_case__ : Optional[int] = do_lower_case
snake_case__ : Union[str, Any] = strip_accents
snake_case__ : List[Any] = tokenize_chinese_chars
snake_case__ : int = normalizer_class(**lowercase__ )
snake_case__ : Optional[int] = do_lower_case
def _lowercase ( self : str , __A : Dict , __A : str=None ):
snake_case__ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _lowercase ( self : List[Any] , __A : Union[str, Any] , __A : Any = None ):
snake_case__ : Tuple = [self.sep_token_id]
snake_case__ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self : str , __A : Any , __A : str = None ):
snake_case__ : Union[str, Any] = self._tokenizer.model.save(lowercase__ , name=lowercase__ )
return tuple(lowercase__ )
| 701 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Optional[Any] = parent
snake_case__ : str = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : Optional[Any] = min_resolution
snake_case__ : List[str] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : str = size
snake_case__ : str = do_normalize
snake_case__ : Optional[Any] = image_mean
snake_case__ : List[str] = image_std
snake_case__ : List[str] = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : Tuple = do_pad
def _lowercase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ):
if not batched:
snake_case__ : List[Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : str = image.size
else:
snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Any = int(self.size["shortest_edge"] * h / w )
snake_case__ : Any = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Any = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Tuple = self.size["shortest_edge"]
snake_case__ : int = self.size["shortest_edge"]
else:
snake_case__ : Any = []
for image in image_inputs:
snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : int = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Tuple ):
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : str ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : int ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[Any] ):
# prepare image and target
snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Tuple = json.loads(f.read() )
snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : str = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image, target and masks_path
snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 0 |
# This is the module that test_patching.py uses to test patch_submodule()
import os # noqa: this is just for tests
import os as renamed_os # noqa: this is just for tests
from os import path # noqa: this is just for tests
from os import path as renamed_path # noqa: this is just for tests
from os.path import join # noqa: this is just for tests
from os.path import join as renamed_join # noqa: this is just for tests
__lowerCamelCase : Any = open # noqa: we just need to have a builtin inside this module to test it properly
| 702 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCamelCase : Tuple = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("ยก" ) , ord("ยฌ" ) + 1 ) ) + list(range(ord("ยฎ" ) , ord("รฟ" ) + 1 ) )
)
snake_case__ : Optional[int] = bs[:]
snake_case__ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
snake_case__ : Dict = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__ : Dict = set()
snake_case__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : List[Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ):
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="utf-8" ) as vocab_handle:
snake_case__ : Any = json.load(__A )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
snake_case__ : Union[str, Any] = errors # how to handle errors in decoding
snake_case__ : Any = bytes_to_unicode()
snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="utf-8" ) as merges_handle:
snake_case__ : str = merges_handle.read().split("\n" )[1:-1]
snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Optional[int] = {}
snake_case__ : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _lowercase ( self : List[Any] ):
return len(self.encoder )
def _lowercase ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Optional[Any] , __A : Optional[int] ):
if token in self.cache:
return self.cache[token]
snake_case__ : Union[str, Any] = tuple(__A )
snake_case__ : List[Any] = get_pairs(__A )
if not pairs:
return token
while True:
snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : Dict = bigram
snake_case__ : str = []
snake_case__ : Union[str, Any] = 0
while i < len(__A ):
try:
snake_case__ : Dict = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : str = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : str = tuple(__A )
snake_case__ : int = new_word
if len(__A ) == 1:
break
else:
snake_case__ : List[str] = get_pairs(__A )
snake_case__ : List[Any] = " ".join(__A )
snake_case__ : Optional[int] = word
return word
def _lowercase ( self : Optional[Any] , __A : Optional[Any] ):
snake_case__ : List[str] = []
for token in re.findall(self.pat , __A ):
snake_case__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) )
return bpe_tokens
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , __A : Optional[Any] ):
return self.decoder.get(__A )
def _lowercase ( self : Union[str, Any] , __A : Dict ):
snake_case__ : Optional[Any] = "".join(__A )
snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : str = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
snake_case__ : str = 0
with open(__A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case__ : int = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ):
snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
snake_case__ : Optional[int] = " " + text
return (text, kwargs)
def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
snake_case__ : Optional[Any] = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case__ : int = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case__ : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 25 | 0 |
from typing import Optional
import numpy as np
import torch
from torch import nn
from transformers import GPTaConfig, GPTaLMHeadModel
from transformers.modeling_utils import ModuleUtilsMixin
from ...configuration_utils import ConfigMixin, register_to_config
from ...models import ModelMixin
class SCREAMING_SNAKE_CASE__ ( __A , __A , __A ):
"""simple docstring"""
a_ = [r"h\.\d+\.attn\.bias", r"h\.\d+\.attn\.masked_bias"]
@register_to_config
def __init__( self : Tuple , __A : Tuple , __A : str , __A : Union[str, Any] = None , __A : Union[str, Any] = 5_0_2_5_7 , __A : int = 1_0_2_4 , __A : Optional[int] = 7_6_8 , __A : str = 1_2 , __A : List[Any] = 1_2 , __A : List[Any] = None , __A : Union[str, Any] = "gelu_new" , __A : str = 0.1 , __A : List[str] = 0.1 , __A : Any = 0.1 , __A : Any = 1e-5 , __A : int = 0.0_2 , __A : str = True , __A : int = True , __A : Optional[int] = False , __A : Any = False , ):
super().__init__()
snake_case__ : Tuple = prefix_length
if prefix_inner_dim != n_embd and prefix_hidden_dim is None:
raise ValueError(
f'''`prefix_hidden_dim` cannot be `None` when `prefix_inner_dim`: {prefix_hidden_dim} and'''
f''' `n_embd`: {n_embd} are not equal.''' )
snake_case__ : Optional[Any] = prefix_inner_dim
snake_case__ : Any = prefix_hidden_dim
snake_case__ : Any = (
nn.Linear(self.prefix_inner_dim , self.prefix_hidden_dim )
if self.prefix_hidden_dim is not None
else nn.Identity()
)
snake_case__ : List[Any] = (
nn.Linear(self.prefix_hidden_dim , __A ) if self.prefix_hidden_dim is not None else nn.Identity()
)
snake_case__ : str = GPTaConfig(
vocab_size=__A , n_positions=__A , n_embd=__A , n_layer=__A , n_head=__A , n_inner=__A , activation_function=__A , resid_pdrop=__A , embd_pdrop=__A , attn_pdrop=__A , layer_norm_epsilon=__A , initializer_range=__A , scale_attn_weights=__A , use_cache=__A , scale_attn_by_inverse_layer_idx=__A , reorder_and_upcast_attn=__A , )
snake_case__ : List[Any] = GPTaLMHeadModel(__A )
def _lowercase ( self : Optional[Any] , __A : Union[str, Any] , __A : int , __A : int = None , __A : str = None , ):
snake_case__ : Tuple = self.transformer.transformer.wte(__A )
snake_case__ : Optional[int] = self.encode_prefix(__A )
snake_case__ : str = self.decode_prefix(__A )
snake_case__ : Tuple = torch.cat((prefix_embeds, embedding_text) , dim=1 )
if labels is not None:
snake_case__ : Tuple = self.get_dummy_token(input_ids.shape[0] , input_ids.device )
snake_case__ : Optional[Any] = torch.cat((dummy_token, input_ids) , dim=1 )
snake_case__ : List[str] = self.transformer(inputs_embeds=__A , labels=__A , attention_mask=__A )
if self.prefix_hidden_dim is not None:
return out, hidden
else:
return out
def _lowercase ( self : List[Any] , __A : Optional[Any] , __A : str ):
return torch.zeros(__A , self.prefix_length , dtype=torch.intaa , device=__A )
def _lowercase ( self : Optional[int] , __A : List[str] ):
return self.encode_prefix(__A )
@torch.no_grad()
def _lowercase ( self : Optional[Any] , __A : List[str] , __A : Dict , __A : List[Any] ):
snake_case__ : Any = torch.split(__A , 1 , dim=0 )
snake_case__ : Union[str, Any] = []
snake_case__ : Optional[Any] = []
for feature in features:
snake_case__ : Optional[int] = self.decode_prefix(feature.to(__A ) ) # back to the clip feature
# Only support beam search for now
snake_case__ : Dict = self.generate_beam(
input_embeds=__A , device=__A , eos_token_id=__A )
generated_tokens.append(output_tokens[0] )
generated_seq_lengths.append(seq_lengths[0] )
snake_case__ : Any = torch.stack(__A )
snake_case__ : int = torch.stack(__A )
return generated_tokens, generated_seq_lengths
@torch.no_grad()
def _lowercase ( self : Optional[Any] , __A : List[str]=None , __A : Dict=None , __A : Optional[Any]=None , __A : Union[str, Any] = 5 , __A : List[str] = 6_7 , __A : str = 1.0 , __A : Optional[int] = None , ):
snake_case__ : Optional[Any] = eos_token_id
snake_case__ : Optional[Any] = None
snake_case__ : Dict = None
snake_case__ : Union[str, Any] = torch.ones(__A , device=__A , dtype=torch.int )
snake_case__ : int = torch.zeros(__A , device=__A , dtype=torch.bool )
if input_embeds is not None:
snake_case__ : Optional[Any] = input_embeds
else:
snake_case__ : Dict = self.transformer.transformer.wte(__A )
for i in range(__A ):
snake_case__ : Dict = self.transformer(inputs_embeds=__A )
snake_case__ : Optional[Any] = outputs.logits
snake_case__ : List[Any] = logits[:, -1, :] / (temperature if temperature > 0 else 1.0)
snake_case__ : List[str] = logits.softmax(-1 ).log()
if scores is None:
snake_case__ : int = logits.topk(__A , -1 )
snake_case__ : Any = generated.expand(__A , *generated.shape[1:] )
snake_case__ : Tuple = next_tokens.permute(1 , 0 ), scores.squeeze(0 )
if tokens is None:
snake_case__ : Tuple = next_tokens
else:
snake_case__ : int = tokens.expand(__A , *tokens.shape[1:] )
snake_case__ : List[Any] = torch.cat((tokens, next_tokens) , dim=1 )
else:
snake_case__ : Tuple = -float(np.inf )
snake_case__ : int = 0
snake_case__ : Any = scores[:, None] + logits
seq_lengths[~is_stopped] += 1
snake_case__ : Union[str, Any] = scores_sum / seq_lengths[:, None]
snake_case__ : List[Any] = scores_sum_average.view(-1 ).topk(__A , -1 )
snake_case__ : Optional[Any] = next_tokens // scores_sum.shape[1]
snake_case__ : str = seq_lengths[next_tokens_source]
snake_case__ : List[str] = next_tokens % scores_sum.shape[1]
snake_case__ : Dict = next_tokens.unsqueeze(1 )
snake_case__ : Dict = tokens[next_tokens_source]
snake_case__ : int = torch.cat((tokens, next_tokens) , dim=1 )
snake_case__ : Dict = generated[next_tokens_source]
snake_case__ : Dict = scores_sum_average * seq_lengths
snake_case__ : str = is_stopped[next_tokens_source]
snake_case__ : Optional[Any] = self.transformer.transformer.wte(next_tokens.squeeze() ).view(generated.shape[0] , 1 , -1 )
snake_case__ : Dict = torch.cat((generated, next_token_embed) , dim=1 )
snake_case__ : Optional[int] = is_stopped + next_tokens.eq(__A ).squeeze()
if is_stopped.all():
break
snake_case__ : List[str] = scores / seq_lengths
snake_case__ : List[str] = scores.argsort(descending=__A )
# tokens tensors are already padded to max_seq_length
snake_case__ : Union[str, Any] = [tokens[i] for i in order]
snake_case__ : Union[str, Any] = torch.stack(__A , dim=0 )
snake_case__ : Any = torch.tensor([seq_lengths[i] for i in order] , dtype=seq_lengths.dtype )
return output_texts, seq_lengths
| 703 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 25 | 0 |
import os
import pytest
from datasets import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
)
__lowerCamelCase : List[Any] = pytest.mark.integration
@pytest.mark.parametrize("path" , ["paws", "csv"] )
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[Any] ):
inspect_dataset(__snake_case , __snake_case )
snake_case__ : int = path + ".py"
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.filterwarnings("ignore:inspect_metric is deprecated:FutureWarning" )
@pytest.mark.filterwarnings("ignore:metric_module_factory is deprecated:FutureWarning" )
@pytest.mark.parametrize("path" , ["accuracy"] )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Dict ):
inspect_metric(__snake_case , __snake_case )
snake_case__ : Union[str, Any] = path + ".py"
assert script_name in os.listdir(__snake_case )
assert "__pycache__" not in os.listdir(__snake_case )
@pytest.mark.parametrize(
"path, config_name, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Optional[int] ):
snake_case__ : Tuple = get_dataset_config_info(__snake_case , config_name=__snake_case )
assert info.config_name == config_name
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : Union[str, Any] ):
with pytest.raises(__snake_case ):
get_dataset_config_info(__snake_case , config_name=__snake_case )
@pytest.mark.parametrize(
"path, expected" , [
("squad", "plain_text"),
("acronym_identification", "default"),
("lhoestq/squad", "plain_text"),
("lhoestq/test", "default"),
("lhoestq/demo1", "lhoestq--demo1"),
("dalle-mini/wit", "dalle-mini--wit"),
] , )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : str ):
snake_case__ : str = get_dataset_config_names(__snake_case )
assert expected in config_names
@pytest.mark.parametrize(
"path, expected_configs, expected_splits_in_first_config" , [
("squad", ["plain_text"], ["train", "validation"]),
("dalle-mini/wit", ["dalle-mini--wit"], ["train"]),
("paws", ["labeled_final", "labeled_swap", "unlabeled_final"], ["train", "test", "validation"]),
] , )
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Optional[int] ):
snake_case__ : Union[str, Any] = get_dataset_infos(__snake_case )
assert list(infos.keys() ) == expected_configs
snake_case__ : Any = expected_configs[0]
assert expected_config in infos
snake_case__ : Optional[int] = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits_in_first_config
@pytest.mark.parametrize(
"path, expected_config, expected_splits" , [
("squad", "plain_text", ["train", "validation"]),
("dalle-mini/wit", "dalle-mini--wit", ["train"]),
("paws", "labeled_final", ["train", "test", "validation"]),
] , )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : Tuple ):
snake_case__ : Dict = get_dataset_infos(__snake_case )
assert expected_config in infos
snake_case__ : Any = infos[expected_config]
assert info.config_name == expected_config
assert list(info.splits.keys() ) == expected_splits
@pytest.mark.parametrize(
"path, config_name, expected_exception" , [
("paws", None, ValueError),
] , )
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : Union[str, Any] , snake_case_ : Tuple ):
with pytest.raises(__snake_case ):
get_dataset_split_names(__snake_case , config_name=__snake_case )
| 704 |
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Any = [0] * len(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
# use last results for better performance - dynamic programming
snake_case__ : Union[str, Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
snake_case__ : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
snake_case__ : int = j
return prefix_result
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return max(prefix_function(snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__lowerCamelCase : Optional[Any] = {
'''iou_prediction_head.layers.0''': '''iou_prediction_head.proj_in''',
'''iou_prediction_head.layers.1''': '''iou_prediction_head.layers.0''',
'''iou_prediction_head.layers.2''': '''iou_prediction_head.proj_out''',
'''mask_decoder.output_upscaling.0''': '''mask_decoder.upscale_conv1''',
'''mask_decoder.output_upscaling.1''': '''mask_decoder.upscale_layer_norm''',
'''mask_decoder.output_upscaling.3''': '''mask_decoder.upscale_conv2''',
'''mask_downscaling.0''': '''mask_embed.conv1''',
'''mask_downscaling.1''': '''mask_embed.layer_norm1''',
'''mask_downscaling.3''': '''mask_embed.conv2''',
'''mask_downscaling.4''': '''mask_embed.layer_norm2''',
'''mask_downscaling.6''': '''mask_embed.conv3''',
'''point_embeddings''': '''point_embed''',
'''pe_layer.positional_encoding_gaussian_matrix''': '''shared_embedding.positional_embedding''',
'''image_encoder''': '''vision_encoder''',
'''neck.0''': '''neck.conv1''',
'''neck.1''': '''neck.layer_norm1''',
'''neck.2''': '''neck.conv2''',
'''neck.3''': '''neck.layer_norm2''',
'''patch_embed.proj''': '''patch_embed.projection''',
'''.norm''': '''.layer_norm''',
'''blocks''': '''layers''',
}
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : List[Any] = {}
state_dict.pop("pixel_mean" , lowerCAmelCase__ )
state_dict.pop("pixel_std" , lowerCAmelCase__ )
snake_case__ : List[Any] = R".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case__ : Union[str, Any] = key.replace(lowerCAmelCase__ , lowerCAmelCase__ )
if re.match(lowerCAmelCase__ , lowerCAmelCase__ ):
snake_case__ : Optional[int] = int(re.match(lowerCAmelCase__ , lowerCAmelCase__ ).group(2 ) )
if layer_nb == 0:
snake_case__ : Union[str, Any] = key.replace("layers.0" , "proj_in" )
elif layer_nb == 1:
snake_case__ : str = key.replace("layers.1" , "layers.0" )
elif layer_nb == 2:
snake_case__ : List[str] = key.replace("layers.2" , "proj_out" )
snake_case__ : Any = value
snake_case__ : str = model_state_dict[
"prompt_encoder.shared_embedding.positional_embedding"
]
return model_state_dict
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Tuple , snake_case_ : List[str] , snake_case_ : Dict="ybelkada/segment-anything" ):
snake_case__ : Dict = hf_hub_download(lowerCAmelCase__ , F'''checkpoints/{model_name}.pth''' )
if "sam_vit_b" in model_name:
snake_case__ : Tuple = SamConfig()
elif "sam_vit_l" in model_name:
snake_case__ : List[Any] = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
snake_case__ : int = SamConfig(
vision_config=lowerCAmelCase__ , )
elif "sam_vit_h" in model_name:
snake_case__ : List[Any] = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
snake_case__ : Optional[int] = SamConfig(
vision_config=lowerCAmelCase__ , )
snake_case__ : Optional[int] = torch.load(lowerCAmelCase__ , map_location="cpu" )
snake_case__ : Union[str, Any] = replace_keys(lowerCAmelCase__ )
snake_case__ : Dict = SamImageProcessor()
snake_case__ : List[str] = SamProcessor(image_processor=lowerCAmelCase__ )
snake_case__ : Optional[int] = SamModel(lowerCAmelCase__ )
hf_model.load_state_dict(lowerCAmelCase__ )
snake_case__ : Optional[Any] = hf_model.to("cuda" )
snake_case__ : int = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
snake_case__ : int = Image.open(requests.get(lowerCAmelCase__ , stream=lowerCAmelCase__ ).raw ).convert("RGB" )
snake_case__ : List[Any] = [[[400, 650]]]
snake_case__ : str = [[1]]
snake_case__ : Union[str, Any] = processor(images=np.array(lowerCAmelCase__ ) , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case__ : Any = hf_model(**lowerCAmelCase__ )
snake_case__ : Optional[Any] = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
snake_case__ : List[str] = processor(
images=np.array(lowerCAmelCase__ ) , input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case__ : int = hf_model(**lowerCAmelCase__ )
snake_case__ : List[str] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
snake_case__ : List[Any] = ((75, 275, 1725, 850),)
snake_case__ : Dict = processor(images=np.array(lowerCAmelCase__ ) , input_boxes=lowerCAmelCase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case__ : Dict = hf_model(**lowerCAmelCase__ )
snake_case__ : Optional[int] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
snake_case__ : Any = [[[400, 650], [800, 650]]]
snake_case__ : Any = [[1, 1]]
snake_case__ : Any = processor(
images=np.array(lowerCAmelCase__ ) , input_points=lowerCAmelCase__ , input_labels=lowerCAmelCase__ , return_tensors="pt" ).to("cuda" )
with torch.no_grad():
snake_case__ : List[Any] = hf_model(**lowerCAmelCase__ )
snake_case__ : Union[str, Any] = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
__lowerCamelCase : List[str] = argparse.ArgumentParser()
__lowerCamelCase : List[Any] = ['''sam_vit_b_01ec64''', '''sam_vit_h_4b8939''', '''sam_vit_l_0b3195''']
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
__lowerCamelCase : int = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 705 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[int] = get_logger()
__lowerCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case__ : str = str(jax.devices()[0] )
snake_case__ : str = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def _lowercase ( self : Optional[Any] , __A : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def _lowercase ( self : int , __A : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ : Optional[int] = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case__ : Any = {"dtype": jnp.intaa}
else:
snake_case__ : Tuple = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
snake_case__ : Optional[Any] = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ):
snake_case__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def _lowercase ( self : Tuple , __A : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def _lowercase ( self : Optional[int] , __A : pa.Table ):
snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A )
snake_case__ : Tuple = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def _lowercase ( self : Optional[Any] , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A )
snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
snake_case__ : Dict = self._consolidate(__A )
return column
def _lowercase ( self : str , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A )
snake_case__ : int = self.python_features_decoder.decode_batch(__A )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
for column_name in batch:
snake_case__ : Any = self._consolidate(batch[column_name] )
return batch
| 25 | 0 |
from typing import Any
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : str , snake_case_ : Any , snake_case_ : Union[str, Any] , snake_case_ : List[str] , ):
_validation(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
# Creates data structures and fill initial step
snake_case__ : dict = {}
snake_case__ : dict = {}
for state in states_space:
snake_case__ : Union[str, Any] = observations_space[0]
snake_case__ : List[Any] = (
initial_probabilities[state] * emission_probabilities[state][observation]
)
snake_case__ : Any = None
# Fills the data structure with the probabilities of
# different transitions and pointers to previous states
for o in range(1 , len(lowerCAmelCase_ ) ):
snake_case__ : Optional[Any] = observations_space[o]
snake_case__ : Tuple = observations_space[o - 1]
for state in states_space:
# Calculates the argmax for probability function
snake_case__ : List[str] = ""
snake_case__ : Dict = -1
for k_state in states_space:
snake_case__ : List[str] = (
probabilities[(k_state, prior_observation)]
* transition_probabilities[k_state][state]
* emission_probabilities[state][observation]
)
if probability > max_probability:
snake_case__ : List[Any] = probability
snake_case__ : List[str] = k_state
# Update probabilities and pointers dicts
snake_case__ : Union[str, Any] = (
probabilities[(arg_max, prior_observation)]
* transition_probabilities[arg_max][state]
* emission_probabilities[state][observation]
)
snake_case__ : Optional[Any] = arg_max
# The final observation
snake_case__ : Dict = observations_space[len(lowerCAmelCase_ ) - 1]
# argmax for given final observation
snake_case__ : Optional[Any] = ""
snake_case__ : Tuple = -1
for k_state in states_space:
snake_case__ : Optional[Any] = probabilities[(k_state, final_observation)]
if probability > max_probability:
snake_case__ : Optional[Any] = probability
snake_case__ : Tuple = k_state
snake_case__ : List[str] = arg_max
# Process pointers backwards
snake_case__ : Union[str, Any] = last_state
snake_case__ : Optional[Any] = []
for o in range(len(lowerCAmelCase_ ) - 1 , -1 , -1 ):
result.append(lowerCAmelCase_ )
snake_case__ : Optional[Any] = pointers[previous, observations_space[o]]
result.reverse()
return result
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : str , snake_case_ : int , snake_case_ : List[str] , ):
_validate_not_empty(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , )
_validate_lists(lowerCAmelCase_ , lowerCAmelCase_ )
_validate_dicts(
lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : Optional[int] , snake_case_ : Union[str, Any] , snake_case_ : Tuple , snake_case_ : Any , ):
if not all(
[
observations_space,
states_space,
initial_probabilities,
transition_probabilities,
emission_probabilities,
] ):
raise ValueError("There's an empty parameter" )
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[Any] ):
_validate_list(lowerCAmelCase_ , "observations_space" )
_validate_list(lowerCAmelCase_ , "states_space" )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Dict ):
if not isinstance(_object , lowerCAmelCase_ ):
snake_case__ : Union[str, Any] = F'''{var_name} must be a list'''
raise ValueError(lowerCAmelCase_ )
else:
for x in _object:
if not isinstance(lowerCAmelCase_ , lowerCAmelCase_ ):
snake_case__ : List[Any] = F'''{var_name} must be a list of strings'''
raise ValueError(lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Any , snake_case_ : Dict , ):
_validate_dict(lowerCAmelCase_ , "initial_probabilities" , lowerCAmelCase_ )
_validate_nested_dict(lowerCAmelCase_ , "transition_probabilities" )
_validate_nested_dict(lowerCAmelCase_ , "emission_probabilities" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[str] ):
_validate_dict(_object , lowerCAmelCase_ , lowerCAmelCase_ )
for x in _object.values():
_validate_dict(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Optional[Any] = False ):
if not isinstance(_object , lowerCAmelCase_ ):
snake_case__ : List[str] = F'''{var_name} must be a dict'''
raise ValueError(lowerCAmelCase_ )
if not all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for x in _object ):
snake_case__ : List[str] = F'''{var_name} all keys must be strings'''
raise ValueError(lowerCAmelCase_ )
if not all(isinstance(lowerCAmelCase_ , lowerCAmelCase_ ) for x in _object.values() ):
snake_case__ : Union[str, Any] = "nested dictionary " if nested else ""
snake_case__ : str = F'''{var_name} {nested_text}all values must be {value_type.__name__}'''
raise ValueError(lowerCAmelCase_ )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 706 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 0 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""facebook/data2vec-vision-base-ft""": (
"""https://huggingface.co/facebook/data2vec-vision-base-ft/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "data2vec-vision"
def __init__( self : Any , __A : Optional[Any]=7_6_8 , __A : Dict=1_2 , __A : List[Any]=1_2 , __A : Optional[int]=3_0_7_2 , __A : Tuple="gelu" , __A : Tuple=0.0 , __A : int=0.0 , __A : Union[str, Any]=0.0_2 , __A : Any=1e-1_2 , __A : Dict=2_2_4 , __A : Any=1_6 , __A : List[str]=3 , __A : int=False , __A : Union[str, Any]=False , __A : Union[str, Any]=False , __A : Tuple=False , __A : Optional[int]=0.1 , __A : Dict=0.1 , __A : Any=True , __A : Dict=[3, 5, 7, 1_1] , __A : Any=[1, 2, 3, 6] , __A : Tuple=True , __A : Dict=0.4 , __A : List[str]=2_5_6 , __A : Any=1 , __A : List[str]=False , __A : Optional[int]=2_5_5 , **__A : Dict , ):
super().__init__(**__A )
snake_case__ : str = hidden_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : int = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : Dict = initializer_range
snake_case__ : List[Any] = layer_norm_eps
snake_case__ : Union[str, Any] = image_size
snake_case__ : str = patch_size
snake_case__ : Tuple = num_channels
snake_case__ : Optional[int] = use_mask_token
snake_case__ : List[str] = use_absolute_position_embeddings
snake_case__ : Any = use_relative_position_bias
snake_case__ : Optional[Any] = use_shared_relative_position_bias
snake_case__ : Optional[Any] = layer_scale_init_value
snake_case__ : str = drop_path_rate
snake_case__ : Optional[int] = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ : Optional[Any] = out_indices
snake_case__ : List[str] = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ : List[str] = use_auxiliary_head
snake_case__ : List[Any] = auxiliary_loss_weight
snake_case__ : Dict = auxiliary_channels
snake_case__ : int = auxiliary_num_convs
snake_case__ : List[str] = auxiliary_concat_input
snake_case__ : str = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = version.parse("1.11" )
@property
def _lowercase ( self : Union[str, Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase ( self : Optional[Any] ):
return 1e-4
| 707 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Tuple = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def _lowercase ( self : Dict ):
snake_case__ : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Union[str, Any] = get_activation("gelu" )
snake_case__ : int = get_activation("gelu_10" )
snake_case__ : Optional[int] = torch_builtin(__A )
snake_case__ : Dict = geluaa(__A )
snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowercase ( self : str ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__A ):
get_activation("bogus" )
with self.assertRaises(__A ):
get_activation(__A )
def _lowercase ( self : List[str] ):
snake_case__ : List[str] = get_activation("gelu" )
snake_case__ : Any = 1
snake_case__ : Union[str, Any] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
snake_case__ : int = acta.a
| 25 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Dict = {'configuration_sew': ['SEW_PRETRAINED_CONFIG_ARCHIVE_MAP', 'SEWConfig']}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
'SEW_PRETRAINED_MODEL_ARCHIVE_LIST',
'SEWForCTC',
'SEWForSequenceClassification',
'SEWModel',
'SEWPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_sew import SEW_PRETRAINED_CONFIG_ARCHIVE_MAP, SEWConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_sew import (
SEW_PRETRAINED_MODEL_ARCHIVE_LIST,
SEWForCTC,
SEWForSequenceClassification,
SEWModel,
SEWPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Any = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 708 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : str = value
elif weight_type == "weight_g":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : str = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
snake_case__ : str = []
snake_case__ : Optional[int] = fairseq_model.state_dict()
snake_case__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[Any] = "weight"
else:
snake_case__ : Optional[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
snake_case__ : Tuple = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : str = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ):
if config_path is not None:
snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = UniSpeechSatConfig()
snake_case__ : str = ""
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ )
else:
snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ )
snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25 | 0 |
import numpy as np
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[str] ):
return np.where(vector > 0 , A_ , (alpha * (np.exp(A_ ) - 1)) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 709 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , ):
if attention_mask is None:
snake_case__ : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case__ : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case__ : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ )
if decoder_head_mask is None:
snake_case__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
if cross_attn_head_mask is None:
snake_case__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Any , __A : List[str]=1_3 , __A : List[Any]=7 , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : str=9_9 , __A : Optional[Any]=1_6 , __A : Optional[Any]=2 , __A : Any=4 , __A : List[Any]=4 , __A : int="relu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[Any]=2_0 , __A : Optional[Any]=2 , __A : int=1 , __A : Union[str, Any]=0 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : List[str] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : List[str] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : int = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Tuple = eos_token_id
snake_case__ : Dict = pad_token_id
snake_case__ : str = bos_token_id
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.eos_token_id # Eos Token
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case__ : int = input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Union[str, Any] = self.get_config()
snake_case__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _lowercase ( self : Dict ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : Optional[Any] , __A : int , __A : Dict ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
snake_case__ : List[Any] = inputs_dict["input_ids"]
snake_case__ : Optional[Any] = inputs_dict["attention_mask"]
snake_case__ : Union[str, Any] = inputs_dict["head_mask"]
# first forward pass
snake_case__ : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
snake_case__, snake_case__ : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case__ : Tuple = model(__A , attention_mask=__A )["last_hidden_state"]
snake_case__ : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[
"last_hidden_state"
]
# select random slice
snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) )
def _lowercase ( self : str , __A : Dict , __A : Optional[Any] ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval()
snake_case__ : Union[str, Any] = model(**__A )
snake_case__ : Tuple = outputs.encoder_last_hidden_state
snake_case__ : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_encoder()
encoder.save_pretrained(__A )
snake_case__ : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_decoder()
decoder.save_pretrained(__A )
snake_case__ : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a_ = True
a_ = True
a_ = False
a_ = False
def _lowercase ( self : int , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowercase ( self : Tuple ):
snake_case__ : Any = MaMaaaModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__A )
def _lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
snake_case__, snake_case__ : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["missing_keys"] , [] )
def _lowercase ( self : Dict ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def _lowercase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case__ : str = model_class(__A )
model.to(__A )
model.eval()
snake_case__ : str = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
snake_case__ : Optional[Any] = inputs["input_ids"]
del inputs["input_ids"]
else:
snake_case__ : Union[str, Any] = inputs["input_ids"]
snake_case__ : List[str] = inputs.get("decoder_input_ids" , __A )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , __A )
snake_case__ : Tuple = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case__ : List[Any] = wte(__A )
else:
snake_case__ : Any = wte(__A )
snake_case__ : Optional[int] = wte(__A )
with torch.no_grad():
model(**__A )[0]
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Any = input_dict["input_ids"]
snake_case__ : int = input_ids.ne(1 ).to(__A )
snake_case__ : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ )
__lowerCamelCase : Optional[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ):
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def _lowercase ( self : Optional[int] ):
snake_case__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : str = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : Optional[Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
# change to intended input
snake_case__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : List[str] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
snake_case__ : List[Any] = [
"L'affaire NSA souligne l'absence totale de dรฉbat sur le renseignement",
"Selon moi, il y a deux niveaux de rรฉponse de la part du gouvernement franรงais.",
"Lorsque Franรงois Hollande tรฉlรฉphone ร Barack Obama ou quand le ministre des affaires รฉtrangรจres Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils rรฉagissent ร une vraie dรฉcouverte, qui est celle de"
" l'ampleur de la surveillance amรฉricaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case__ : str = tokenizer(__A , padding=__A , return_tensors="pt" )
snake_case__ : Tuple = model.generate(
input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
snake_case__ : List[str] = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When Franรงois Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
snake_case__ : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 25 | 0 |
import inspect
import unittest
from transformers import MobileViTVaConfig
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation, MobileViTVaModel
from transformers.models.mobilevitva.modeling_mobilevitva import (
MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST,
make_divisible,
)
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( snake_case__ ):
"""simple docstring"""
def _lowercase ( self : str ):
snake_case__ : List[str] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(_A , "width_multiplier" ) )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Optional[int] , __A : List[Any] , __A : List[Any]=1_3 , __A : str=6_4 , __A : Dict=2 , __A : Tuple=3 , __A : Optional[Any]="swish" , __A : Optional[Any]=3 , __A : Optional[int]=3_2 , __A : int=0.1 , __A : str=0.0_2 , __A : str=True , __A : Any=True , __A : Any=1_0 , __A : Any=None , __A : Optional[int]=0.2_5 , __A : int=0.0 , __A : int=0.0 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : Dict = batch_size
snake_case__ : Union[str, Any] = image_size
snake_case__ : Dict = patch_size
snake_case__ : Tuple = num_channels
snake_case__ : List[Any] = make_divisible(5_1_2 * width_multiplier , divisor=8 )
snake_case__ : List[str] = hidden_act
snake_case__ : Union[str, Any] = conv_kernel_size
snake_case__ : Union[str, Any] = output_stride
snake_case__ : Union[str, Any] = classifier_dropout_prob
snake_case__ : str = use_labels
snake_case__ : Optional[int] = is_training
snake_case__ : Any = num_labels
snake_case__ : Any = initializer_range
snake_case__ : Tuple = scope
snake_case__ : Optional[Any] = width_multiplier
snake_case__ : str = ffn_dropout
snake_case__ : Dict = attn_dropout
def _lowercase ( self : Union[str, Any] ):
snake_case__ : List[str] = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Union[str, Any] = None
snake_case__ : List[str] = None
if self.use_labels:
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : Any = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
snake_case__ : Any = self.get_config()
return config, pixel_values, labels, pixel_labels
def _lowercase ( self : str ):
return MobileViTVaConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_act=self.hidden_act , conv_kernel_size=self.conv_kernel_size , output_stride=self.output_stride , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , width_multiplier=self.width_multiplier , ffn_dropout=self.ffn_dropout_prob , attn_dropout=self.attn_dropout_prob , )
def _lowercase ( self : int , __A : Tuple , __A : Union[str, Any] , __A : Any , __A : Any ):
snake_case__ : Union[str, Any] = MobileViTVaModel(config=_A )
model.to(_A )
model.eval()
snake_case__ : Any = model(_A )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] , __A : Optional[Any] , __A : Dict , __A : int ):
snake_case__ : List[Any] = self.num_labels
snake_case__ : Union[str, Any] = MobileViTVaForImageClassification(_A )
model.to(_A )
model.eval()
snake_case__ : List[str] = model(_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : List[Any] , __A : Tuple , __A : Any , __A : str , __A : List[Any] ):
snake_case__ : Dict = self.num_labels
snake_case__ : Dict = MobileViTVaForSemanticSegmentation(_A )
model.to(_A )
model.eval()
snake_case__ : Tuple = model(_A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
snake_case__ : Optional[int] = model(_A , labels=_A )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def _lowercase ( self : List[Any] ):
snake_case__ : Optional[Any] = self.prepare_config_and_inputs()
snake_case__ : Optional[int] = config_and_inputs
snake_case__ : List[Any] = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( snake_case__ , snake_case__ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(MobileViTVaModel, MobileViTVaForImageClassification, MobileViTVaForSemanticSegmentation)
if is_torch_available()
else ()
)
a_ = (
{
'''feature-extraction''': MobileViTVaModel,
'''image-classification''': MobileViTVaForImageClassification,
'''image-segmentation''': MobileViTVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = MobileViTVaModelTester(self )
snake_case__ : int = MobileViTVaConfigTester(self , config_class=_A , has_text_modality=_A )
def _lowercase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="MobileViTV2 does not use inputs_embeds" )
def _lowercase ( self : Optional[Any] ):
pass
@unittest.skip(reason="MobileViTV2 does not support input and output embeddings" )
def _lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason="MobileViTV2 does not output attentions" )
def _lowercase ( self : int ):
pass
@require_torch_multi_gpu
@unittest.skip(reason="Got `CUDA error: misaligned address` for tests after this one being run." )
def _lowercase ( self : str ):
pass
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self : Union[str, Any] ):
pass
def _lowercase ( self : List[str] ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(_A )
snake_case__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Tuple = [*signature.parameters.keys()]
snake_case__ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , _A )
def _lowercase ( self : List[Any] ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def _lowercase ( self : List[str] ):
def check_hidden_states_output(__A : Optional[Any] , __A : Optional[Any] , __A : Dict ):
snake_case__ : Optional[int] = model_class(_A )
model.to(_A )
model.eval()
with torch.no_grad():
snake_case__ : Dict = model(**self._prepare_for_class(_A , _A ) )
snake_case__ : Optional[int] = outputs.hidden_states
snake_case__ : Any = 5
self.assertEqual(len(_A ) , _A )
# MobileViTV2's feature maps are of shape (batch_size, num_channels, height, width)
# with the width and height being successively divided by 2.
snake_case__ : Optional[Any] = 2
for i in range(len(_A ) ):
self.assertListEqual(
list(hidden_states[i].shape[-2:] ) , [self.model_tester.image_size // divisor, self.model_tester.image_size // divisor] , )
divisor *= 2
self.assertEqual(self.model_tester.output_stride , divisor // 2 )
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : List[str] = True
check_hidden_states_output(_A , _A , _A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : Optional[Any] = True
check_hidden_states_output(_A , _A , _A )
def _lowercase ( self : str ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*_A )
def _lowercase ( self : Optional[int] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*_A )
@slow
def _lowercase ( self : Optional[int] ):
for model_name in MOBILEVITV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[str] = MobileViTVaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ):
return (
MobileViTImageProcessor.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : List[str] ):
snake_case__ : Dict = MobileViTVaForImageClassification.from_pretrained("apple/mobilevitv2-1.0-imagenet1k-256" ).to(
_A )
snake_case__ : Tuple = self.default_image_processor
snake_case__ : Union[str, Any] = prepare_img()
snake_case__ : int = image_processor(images=_A , return_tensors="pt" ).to(_A )
# forward pass
with torch.no_grad():
snake_case__ : str = model(**_A )
# verify the logits
snake_case__ : Dict = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , _A )
snake_case__ : Dict = torch.tensor([-1.6_3_3_6e0_0, -7.3_2_0_4e-0_2, -5.1_8_8_3e-0_1] ).to(_A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , _A , atol=1e-4 ) )
@slow
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case__ : Union[str, Any] = model.to(_A )
snake_case__ : int = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case__ : Union[str, Any] = prepare_img()
snake_case__ : Dict = image_processor(images=_A , return_tensors="pt" ).to(_A )
# forward pass
with torch.no_grad():
snake_case__ : int = model(**_A )
snake_case__ : Optional[int] = outputs.logits
# verify the logits
snake_case__ : List[str] = torch.Size((1, 2_1, 3_2, 3_2) )
self.assertEqual(logits.shape , _A )
snake_case__ : Dict = torch.tensor(
[
[[7.0_8_6_3, 7.1_5_2_5, 6.8_2_0_1], [6.6_9_3_1, 6.8_7_7_0, 6.8_9_3_3], [6.2_9_7_8, 7.0_3_6_6, 6.9_6_3_6]],
[[-3.7_1_3_4, -3.6_7_1_2, -3.6_6_7_5], [-3.5_8_2_5, -3.3_5_4_9, -3.4_7_7_7], [-3.3_4_3_5, -3.3_9_7_9, -3.2_8_5_7]],
[[-2.9_3_2_9, -2.8_0_0_3, -2.7_3_6_9], [-3.0_5_6_4, -2.4_7_8_0, -2.0_2_0_7], [-2.6_8_8_9, -1.9_2_9_8, -1.7_6_4_0]],
] , device=_A , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , _A , atol=1e-4 ) )
@slow
def _lowercase ( self : int ):
snake_case__ : Tuple = MobileViTVaForSemanticSegmentation.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case__ : str = model.to(_A )
snake_case__ : Union[str, Any] = MobileViTImageProcessor.from_pretrained("shehan97/mobilevitv2-1.0-voc-deeplabv3" )
snake_case__ : Union[str, Any] = prepare_img()
snake_case__ : List[Any] = image_processor(images=_A , return_tensors="pt" ).to(_A )
# forward pass
with torch.no_grad():
snake_case__ : str = model(**_A )
snake_case__ : Dict = outputs.logits.detach().cpu()
snake_case__ : Optional[int] = image_processor.post_process_semantic_segmentation(outputs=_A , target_sizes=[(5_0, 6_0)] )
snake_case__ : Dict = torch.Size((5_0, 6_0) )
self.assertEqual(segmentation[0].shape , _A )
snake_case__ : List[str] = image_processor.post_process_semantic_segmentation(outputs=_A )
snake_case__ : str = torch.Size((3_2, 3_2) )
self.assertEqual(segmentation[0].shape , _A )
| 710 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] ):
snake_case__ : Optional[int] = []
for part_id in partition_order:
snake_case__ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(snake_case_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Union[str, Any] = spark.range(100 ).repartition(1 )
snake_case__ : Any = Spark(snake_case_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[Any] = spark.range(10 ).repartition(2 )
snake_case__ : Optional[Any] = [1, 0]
snake_case__ : Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions.
snake_case__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case__, snake_case__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[int] = spark.range(10 ).repartition(1 )
snake_case__ : Union[str, Any] = SparkExamplesIterable(snake_case_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
snake_case__ : Union[str, Any] = lambda snake_case_ : x.reverse()
snake_case__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] )
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case__ : Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(100 ).repartition(1 )
snake_case__ : Union[str, Any] = Spark(snake_case_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 25 | 0 |
import argparse
import hashlib # hashlib is only used inside the Test class
import struct
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Any , __A : List[str] ):
snake_case__ : List[Any] = data
snake_case__ : Tuple = [0x67452301, 0xEFCDAB89, 0x98BADCFE, 0x10325476, 0xC3D2E1F0]
@staticmethod
def _lowercase ( __A : str , __A : Union[str, Any] ):
return ((n << b) | (n >> (3_2 - b))) & 0xFFFFFFFF
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = B"\x80" + B"\x00" * (6_3 - (len(self.data ) + 8) % 6_4)
snake_case__ : Union[str, Any] = self.data + padding + struct.pack(">Q" , 8 * len(self.data ) )
return padded_data
def _lowercase ( self : Union[str, Any] ):
return [
self.padded_data[i : i + 6_4] for i in range(0 , len(self.padded_data ) , 6_4 )
]
def _lowercase ( self : Union[str, Any] , __A : str ):
snake_case__ : Dict = list(struct.unpack(">16L" , __a ) ) + [0] * 6_4
for i in range(1_6 , 8_0 ):
snake_case__ : Dict = self.rotate((w[i - 3] ^ w[i - 8] ^ w[i - 1_4] ^ w[i - 1_6]) , 1 )
return w
def _lowercase ( self : Any ):
snake_case__ : Tuple = self.padding()
snake_case__ : List[Any] = self.split_blocks()
for block in self.blocks:
snake_case__ : Optional[Any] = self.expand_block(__a )
snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ : Tuple = self.h
for i in range(0 , 8_0 ):
if 0 <= i < 2_0:
snake_case__ : Any = (b & c) | ((~b) & d)
snake_case__ : Optional[Any] = 0x5A827999
elif 2_0 <= i < 4_0:
snake_case__ : Optional[int] = b ^ c ^ d
snake_case__ : Dict = 0x6ED9EBA1
elif 4_0 <= i < 6_0:
snake_case__ : List[str] = (b & c) | (b & d) | (c & d)
snake_case__ : Tuple = 0x8F1BBCDC
elif 6_0 <= i < 8_0:
snake_case__ : Union[str, Any] = b ^ c ^ d
snake_case__ : Tuple = 0xCA62C1D6
snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ : Dict = (
self.rotate(__a , 5 ) + f + e + k + expanded_block[i] & 0xFFFFFFFF,
a,
self.rotate(__a , 3_0 ),
c,
d,
)
snake_case__ : List[Any] = (
self.h[0] + a & 0xFFFFFFFF,
self.h[1] + b & 0xFFFFFFFF,
self.h[2] + c & 0xFFFFFFFF,
self.h[3] + d & 0xFFFFFFFF,
self.h[4] + e & 0xFFFFFFFF,
)
return ("{:08x}" * 5).format(*self.h )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Union[str, Any] = b"Test String"
assert SHAaHash(__snake_case ).final_hash() == hashlib.shaa(__snake_case ).hexdigest() # noqa: S324
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : List[str] = argparse.ArgumentParser(description="Process some strings or files" )
parser.add_argument(
"--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument("--file" , dest="input_file" , help="Hash contents of a file" )
snake_case__ : List[Any] = parser.parse_args()
snake_case__ : Dict = args.input_string
# In any case hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
snake_case__ : Tuple = f.read()
else:
snake_case__ : Any = bytes(__snake_case , "utf-8" )
print(SHAaHash(__snake_case ).final_hash() )
if __name__ == "__main__":
main()
import doctest
doctest.testmod()
| 711 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 0 |
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Optional[Any] ):
snake_case__ : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
snake_case__ : Union[str, Any] = Image.open(requests.get(lowerCamelCase_ , stream=lowerCamelCase_ ).raw ).convert("RGB" )
snake_case__ : Tuple = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73) , (0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11) ),
] )
snake_case__ : Dict = transform(lowerCamelCase_ ).unsqueeze(0 ).to(lowerCamelCase_ )
return image
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
if "visual_encoder" in key:
snake_case__ : Any = re.sub("visual_encoder*" , "vision_model.encoder" , lowerCamelCase_ )
if "blocks" in key:
snake_case__ : Union[str, Any] = re.sub(R"blocks" , "layers" , lowerCamelCase_ )
if "attn" in key:
snake_case__ : Dict = re.sub(R"attn" , "self_attn" , lowerCamelCase_ )
if "norm1" in key:
snake_case__ : str = re.sub(R"norm1" , "layer_norm1" , lowerCamelCase_ )
if "norm2" in key:
snake_case__ : Tuple = re.sub(R"norm2" , "layer_norm2" , lowerCamelCase_ )
if "encoder.norm" in key:
snake_case__ : Dict = re.sub(R"encoder.norm" , "post_layernorm" , lowerCamelCase_ )
if "encoder.patch_embed.proj" in key:
snake_case__ : str = re.sub(R"encoder.patch_embed.proj" , "embeddings.patch_embedding" , lowerCamelCase_ )
if "encoder.pos_embed" in key:
snake_case__ : Dict = re.sub(R"encoder.pos_embed" , "embeddings.position_embedding" , lowerCamelCase_ )
if "encoder.cls_token" in key:
snake_case__ : Optional[Any] = re.sub(R"encoder.cls_token" , "embeddings.class_embedding" , lowerCamelCase_ )
if "self_attn" in key:
snake_case__ : Union[str, Any] = re.sub(R"self_attn.proj" , "self_attn.projection" , lowerCamelCase_ )
return key
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Dict=None ):
if config_path is not None:
snake_case__ : List[Any] = BlipConfig.from_pretrained(lowerCamelCase_ )
else:
snake_case__ : Tuple = BlipConfig(projection_dim=512 , text_config={} , vision_config={} )
snake_case__ : Dict = BlipForConditionalGeneration(lowerCamelCase_ ).eval()
snake_case__ : List[Any] = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
snake_case__ : Dict = blip_decoder(pretrained=lowerCamelCase_ , image_size=384 , vit="base" )
snake_case__ : Optional[int] = pt_model.eval()
snake_case__ : Union[str, Any] = pt_model.state_dict()
for key in modified_state_dict.copy():
snake_case__ : Dict = modified_state_dict.pop(lowerCamelCase_ )
snake_case__ : int = rename_key(lowerCamelCase_ )
snake_case__ : Union[str, Any] = value
hf_model.load_state_dict(lowerCamelCase_ )
snake_case__ : Tuple = 384
snake_case__ : Union[str, Any] = load_demo_image(image_size=lowerCamelCase_ , device="cpu" )
snake_case__ : int = BertTokenizer.from_pretrained("bert-base-uncased" )
snake_case__ : Optional[int] = tokenizer(["a picture of"] ).input_ids
snake_case__ : Optional[Any] = hf_model.generate(lowerCamelCase_ , lowerCamelCase_ )
assert out[0].tolist() == [30522, 1037, 3861, 1997, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
snake_case__ : List[Any] = hf_model.generate(lowerCamelCase_ )
assert out[0].tolist() == [30522, 1037, 2450, 3564, 2006, 1996, 3509, 2007, 2014, 3899, 102]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(lowerCamelCase_ )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
snake_case__ : List[Any] = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
snake_case__ : Tuple = blip_vqa(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit="base" )
vqa_model.eval()
snake_case__ : Tuple = vqa_model.state_dict()
for key in modified_state_dict.copy():
snake_case__ : Dict = modified_state_dict.pop(lowerCamelCase_ )
snake_case__ : Tuple = rename_key(lowerCamelCase_ )
snake_case__ : List[Any] = value
snake_case__ : Any = BlipForQuestionAnswering(lowerCamelCase_ )
hf_vqa_model.load_state_dict(lowerCamelCase_ )
snake_case__ : Dict = ["""How many dogs are in this image?"""]
snake_case__ : Any = tokenizer(lowerCamelCase_ , return_tensors="pt" ).input_ids
snake_case__ : str = hf_vqa_model.generate(lowerCamelCase_ , lowerCamelCase_ )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + "_vqa" )
snake_case__ : str = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
snake_case__ : List[str] = blip_itm(pretrained=lowerCamelCase_ , image_size=lowerCamelCase_ , vit="base" )
itm_model.eval()
snake_case__ : Tuple = itm_model.state_dict()
for key in modified_state_dict.copy():
snake_case__ : Dict = modified_state_dict.pop(lowerCamelCase_ )
snake_case__ : Optional[int] = rename_key(lowerCamelCase_ )
snake_case__ : List[Any] = value
snake_case__ : str = BlipForImageTextRetrieval(lowerCamelCase_ )
snake_case__ : str = ["""A picture of a woman with a dog sitting in a beach"""]
snake_case__ : str = tokenizer(
lowerCamelCase_ , return_tensors="pt" , padding="max_length" , truncation=lowerCamelCase_ , max_length=35 , ).input_ids
hf_itm_model.load_state_dict(lowerCamelCase_ )
hf_itm_model.eval()
snake_case__ : List[Any] = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
snake_case__ : List[Any] = hf_itm_model(lowerCamelCase_ , lowerCamelCase_ , use_itm_head=lowerCamelCase_ )
assert out[0].item() == 0.21_10_68_74_94_27_79_54
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.4_56_98_84_53_86_50_51_27
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + "_itm" )
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
__lowerCamelCase : List[str] = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path)
| 712 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 25 | 0 |
import math
from typing import Any, Callable, List, Optional, Tuple, Union
import numpy as np
import torch
from ...models import TaFilmDecoder
from ...schedulers import DDPMScheduler
from ...utils import is_onnx_available, logging, randn_tensor
if is_onnx_available():
from ..onnx_utils import OnnxRuntimeModel
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
from .continous_encoder import SpectrogramContEncoder
from .notes_encoder import SpectrogramNotesEncoder
__lowerCamelCase : List[str] = logging.get_logger(__name__) # pylint: disable=invalid-name
__lowerCamelCase : Optional[int] = 256
class SCREAMING_SNAKE_CASE__ ( __lowerCamelCase ):
"""simple docstring"""
a_ = ['melgan']
def __init__( self : Tuple , __A : SpectrogramNotesEncoder , __A : SpectrogramContEncoder , __A : TaFilmDecoder , __A : DDPMScheduler , __A : OnnxRuntimeModel if is_onnx_available() else Any , ):
super().__init__()
# From MELGAN
snake_case__ : List[Any] = math.log(1e-5 ) # Matches MelGAN training.
snake_case__ : Any = 4.0 # Largest value for most examples
snake_case__ : Optional[int] = 1_2_8
self.register_modules(
notes_encoder=UpperCamelCase_ , continuous_encoder=UpperCamelCase_ , decoder=UpperCamelCase_ , scheduler=UpperCamelCase_ , melgan=UpperCamelCase_ , )
def _lowercase ( self : List[str] , __A : List[Any] , __A : Any=(-1.0, 1.0) , __A : Optional[int]=False ):
snake_case__, snake_case__ : Optional[int] = output_range
if clip:
snake_case__ : Dict = torch.clip(UpperCamelCase_ , self.min_value , self.max_value )
# Scale to [0, 1].
snake_case__ : Union[str, Any] = (features - self.min_value) / (self.max_value - self.min_value)
# Scale to [min_out, max_out].
return zero_one * (max_out - min_out) + min_out
def _lowercase ( self : Tuple , __A : Any , __A : Any=(-1.0, 1.0) , __A : Optional[Any]=False ):
snake_case__, snake_case__ : Tuple = input_range
snake_case__ : Optional[int] = torch.clip(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ ) if clip else outputs
# Scale to [0, 1].
snake_case__ : Dict = (outputs - min_out) / (max_out - min_out)
# Scale to [self.min_value, self.max_value].
return zero_one * (self.max_value - self.min_value) + self.min_value
def _lowercase ( self : str , __A : Dict , __A : int , __A : Dict ):
snake_case__ : Optional[int] = input_tokens > 0
snake_case__, snake_case__ : Optional[Any] = self.notes_encoder(
encoder_input_tokens=UpperCamelCase_ , encoder_inputs_mask=UpperCamelCase_ )
snake_case__, snake_case__ : Tuple = self.continuous_encoder(
encoder_inputs=UpperCamelCase_ , encoder_inputs_mask=UpperCamelCase_ )
return [(tokens_encoded, tokens_mask), (continuous_encoded, continuous_mask)]
def _lowercase ( self : Optional[int] , __A : int , __A : str , __A : Union[str, Any] ):
snake_case__ : Optional[int] = noise_time
if not torch.is_tensor(UpperCamelCase_ ):
snake_case__ : Optional[int] = torch.tensor([timesteps] , dtype=torch.long , device=input_tokens.device )
elif torch.is_tensor(UpperCamelCase_ ) and len(timesteps.shape ) == 0:
snake_case__ : Tuple = timesteps[None].to(input_tokens.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ : List[str] = timesteps * torch.ones(input_tokens.shape[0] , dtype=timesteps.dtype , device=timesteps.device )
snake_case__ : Any = self.decoder(
encodings_and_masks=UpperCamelCase_ , decoder_input_tokens=UpperCamelCase_ , decoder_noise_time=UpperCamelCase_ )
return logits
@torch.no_grad()
def __call__( self : Optional[int] , __A : List[List[int]] , __A : Optional[torch.Generator] = None , __A : int = 1_0_0 , __A : bool = True , __A : str = "numpy" , __A : Optional[Callable[[int, int, torch.FloatTensor], None]] = None , __A : int = 1 , ):
if (callback_steps is None) or (
callback_steps is not None and (not isinstance(UpperCamelCase_ , UpperCamelCase_ ) or callback_steps <= 0)
):
raise ValueError(
f'''`callback_steps` has to be a positive integer but is {callback_steps} of type'''
f''' {type(UpperCamelCase_ )}.''' )
snake_case__ : Union[str, Any] = np.zeros([1, TARGET_FEATURE_LENGTH, self.n_dims] , dtype=np.floataa )
snake_case__ : Union[str, Any] = np.zeros([1, 0, self.n_dims] , np.floataa )
snake_case__ : Optional[int] = torch.ones((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase_ , device=self.device )
for i, encoder_input_tokens in enumerate(UpperCamelCase_ ):
if i == 0:
snake_case__ : Any = torch.from_numpy(pred_mel[:1].copy() ).to(
device=self.device , dtype=self.decoder.dtype )
# The first chunk has no previous context.
snake_case__ : Tuple = torch.zeros((1, TARGET_FEATURE_LENGTH) , dtype=UpperCamelCase_ , device=self.device )
else:
# The full song pipeline does not feed in a context feature, so the mask
# will be all 0s after the feature converter. Because we know we're
# feeding in a full context chunk from the previous prediction, set it
# to all 1s.
snake_case__ : Union[str, Any] = ones
snake_case__ : Dict = self.scale_features(
UpperCamelCase_ , output_range=[-1.0, 1.0] , clip=UpperCamelCase_ )
snake_case__ : Dict = self.encode(
input_tokens=torch.IntTensor([encoder_input_tokens] ).to(device=self.device ) , continuous_inputs=UpperCamelCase_ , continuous_mask=UpperCamelCase_ , )
# Sample encoder_continuous_inputs shaped gaussian noise to begin loop
snake_case__ : int = randn_tensor(
shape=encoder_continuous_inputs.shape , generator=UpperCamelCase_ , device=self.device , dtype=self.decoder.dtype , )
# set step values
self.scheduler.set_timesteps(UpperCamelCase_ )
# Denoising diffusion loop
for j, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
snake_case__ : str = self.decode(
encodings_and_masks=UpperCamelCase_ , input_tokens=UpperCamelCase_ , noise_time=t / self.scheduler.config.num_train_timesteps , )
# Compute previous output: x_t -> x_t-1
snake_case__ : str = self.scheduler.step(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , generator=UpperCamelCase_ ).prev_sample
snake_case__ : List[str] = self.scale_to_features(UpperCamelCase_ , input_range=[-1.0, 1.0] )
snake_case__ : Optional[Any] = mel[:1]
snake_case__ : Optional[Any] = mel.cpu().float().numpy()
snake_case__ : Optional[Any] = np.concatenate([full_pred_mel, pred_mel[:1]] , axis=1 )
# call the callback, if provided
if callback is not None and i % callback_steps == 0:
callback(UpperCamelCase_ , UpperCamelCase_ )
logger.info("Generated segment" , UpperCamelCase_ )
if output_type == "numpy" and not is_onnx_available():
raise ValueError(
"Cannot return output in 'np' format if ONNX is not available. Make sure to have ONNX installed or set 'output_type' to 'mel'." )
elif output_type == "numpy" and self.melgan is None:
raise ValueError(
"Cannot return output in 'np' format if melgan component is not defined. Make sure to define `self.melgan` or set 'output_type' to 'mel'." )
if output_type == "numpy":
snake_case__ : Union[str, Any] = self.melgan(input_features=full_pred_mel.astype(np.floataa ) )
else:
snake_case__ : List[Any] = full_pred_mel
if not return_dict:
return (output,)
return AudioPipelineOutput(audios=UpperCamelCase_ )
| 713 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE ( snake_case_ : dict ):
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ):
snake_case__ : Optional[int] = XGBClassifier()
classifier.fit(snake_case_ , snake_case_ )
return classifier
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = load_iris()
snake_case__, snake_case__ : str = data_handling(snake_case_ )
snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split(
snake_case_ , snake_case_ , test_size=0.25 )
snake_case__ : Dict = iris["target_names"]
# Create an XGBoost Classifier from the training data
snake_case__ : Dict = xgboost(snake_case_ , snake_case_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 25 | 0 |
import contextlib
import csv
import json
import os
import sqlitea
import tarfile
import textwrap
import zipfile
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
import datasets
import datasets.config
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : str = 10
snake_case__ : Union[str, Any] = datasets.Features(
{
"tokens": datasets.Sequence(datasets.Value("string" ) ),
"labels": datasets.Sequence(datasets.ClassLabel(names=["negative", "positive"] ) ),
"answers": datasets.Sequence(
{
"text": datasets.Value("string" ),
"answer_start": datasets.Value("int32" ),
} ),
"id": datasets.Value("int64" ),
} )
snake_case__ : List[str] = datasets.Dataset.from_dict(
{
"tokens": [["foo"] * 5] * n,
"labels": [[1] * 5] * n,
"answers": [{"answer_start": [97], "text": ["1976"]}] * 10,
"id": list(range(lowerCamelCase__ ) ),
} , features=lowerCamelCase__ , )
return dataset
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : Optional[Any] ):
snake_case__ : str = str(tmp_path_factory.mktemp("data" ) / "file.arrow" )
dataset.map(cache_file_name=lowerCamelCase__ )
return filename
# FILE_CONTENT + files
__lowerCamelCase : Any = """\
Text data.
Second line of data."""
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__ : Any = tmp_path_factory.mktemp("data" ) / "file.txt"
snake_case__ : List[str] = FILE_CONTENT
with open(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ )
return filename
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
import bza
snake_case__ : Dict = tmp_path_factory.mktemp("data" ) / "file.txt.bz2"
snake_case__ : Tuple = bytes(lowerCamelCase__ , "utf-8" )
with bza.open(lowerCamelCase__ , "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
import gzip
snake_case__ : List[str] = str(tmp_path_factory.mktemp("data" ) / "file.txt.gz" )
snake_case__ : List[str] = bytes(lowerCamelCase__ , "utf-8" )
with gzip.open(lowerCamelCase__ , "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict ):
if datasets.config.LZ4_AVAILABLE:
import lza.frame
snake_case__ : Dict = tmp_path_factory.mktemp("data" ) / "file.txt.lz4"
snake_case__ : Optional[Any] = bytes(lowerCamelCase__ , "utf-8" )
with lza.frame.open(lowerCamelCase__ , "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Optional[Any] ):
if datasets.config.PY7ZR_AVAILABLE:
import pyazr
snake_case__ : int = tmp_path_factory.mktemp("data" ) / "file.txt.7z"
with pyazr.SevenZipFile(lowerCamelCase__ , "w" ) as archive:
archive.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int ):
import tarfile
snake_case__ : str = tmp_path_factory.mktemp("data" ) / "file.txt.tar"
with tarfile.TarFile(lowerCamelCase__ , "w" ) as f:
f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
import lzma
snake_case__ : Optional[Any] = tmp_path_factory.mktemp("data" ) / "file.txt.xz"
snake_case__ : List[str] = bytes(lowerCamelCase__ , "utf-8" )
with lzma.open(lowerCamelCase__ , "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : Tuple ):
import zipfile
snake_case__ : Optional[int] = tmp_path_factory.mktemp("data" ) / "file.txt.zip"
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict ):
if datasets.config.ZSTANDARD_AVAILABLE:
import zstandard as zstd
snake_case__ : Any = tmp_path_factory.mktemp("data" ) / "file.txt.zst"
snake_case__ : Optional[Any] = bytes(lowerCamelCase__ , "utf-8" )
with zstd.open(lowerCamelCase__ , "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
snake_case__ : Tuple = tmp_path_factory.mktemp("data" ) / "file.xml"
snake_case__ : Optional[int] = textwrap.dedent(
"\\n <?xml version=\"1.0\" encoding=\"UTF-8\" ?>\n <tmx version=\"1.4\">\n <header segtype=\"sentence\" srclang=\"ca\" />\n <body>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 1</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 1</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 2</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 2</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 3</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 3</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 4</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 4</seg></tuv>\n </tu>\n <tu>\n <tuv xml:lang=\"ca\"><seg>Contingut 5</seg></tuv>\n <tuv xml:lang=\"en\"><seg>Content 5</seg></tuv>\n </tu>\n </body>\n </tmx>" )
with open(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ )
return filename
__lowerCamelCase : List[Any] = [
{"""col_1""": """0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """3""", """col_2""": 3, """col_3""": 3.0},
]
__lowerCamelCase : str = [
{"""col_1""": """4""", """col_2""": 4, """col_3""": 4.0},
{"""col_1""": """5""", """col_2""": 5, """col_3""": 5.0},
]
__lowerCamelCase : Optional[Any] = {
"""col_1""": ["""0""", """1""", """2""", """3"""],
"""col_2""": [0, 1, 2, 3],
"""col_3""": [0.0, 1.0, 2.0, 3.0],
}
__lowerCamelCase : Union[str, Any] = [
{"""col_3""": 0.0, """col_1""": """0""", """col_2""": 0},
{"""col_3""": 1.0, """col_1""": """1""", """col_2""": 1},
]
__lowerCamelCase : Union[str, Any] = [
{"""col_1""": """s0""", """col_2""": 0, """col_3""": 0.0},
{"""col_1""": """s1""", """col_2""": 1, """col_3""": 1.0},
{"""col_1""": """s2""", """col_2""": 2, """col_3""": 2.0},
{"""col_1""": """s3""", """col_2""": 3, """col_3""": 3.0},
]
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( ):
return DATA_DICT_OF_LISTS
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict ):
snake_case__ : Optional[int] = datasets.Dataset.from_dict(lowerCamelCase__ )
snake_case__ : Any = str(tmp_path_factory.mktemp("data" ) / "dataset.arrow" )
dataset.map(cache_file_name=lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] ):
snake_case__ : List[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.sqlite" )
with contextlib.closing(sqlitea.connect(lowerCamelCase__ ) ) as con:
snake_case__ : Union[str, Any] = con.cursor()
cur.execute("CREATE TABLE dataset(col_1 text, col_2 int, col_3 real)" )
for item in DATA:
cur.execute("INSERT INTO dataset(col_1, col_2, col_3) VALUES (?, ?, ?)" , tuple(item.values() ) )
con.commit()
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset.csv" )
with open(lowerCamelCase__ , "w" , newline="" ) as f:
snake_case__ : Optional[int] = csv.DictWriter(lowerCamelCase__ , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict ):
snake_case__ : int = str(tmp_path_factory.mktemp("data" ) / "dataset2.csv" )
with open(lowerCamelCase__ , "w" , newline="" ) as f:
snake_case__ : Union[str, Any] = csv.DictWriter(lowerCamelCase__ , fieldnames=["col_1", "col_2", "col_3"] )
writer.writeheader()
for item in DATA:
writer.writerow(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Any ):
import bza
snake_case__ : Any = tmp_path_factory.mktemp("data" ) / "dataset.csv.bz2"
with open(lowerCamelCase__ , "rb" ) as f:
snake_case__ : Optional[Any] = f.read()
# data = bytes(FILE_CONTENT, "utf-8")
with bza.open(lowerCamelCase__ , "wb" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Any ):
snake_case__ : Optional[int] = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Union[str, Any] ):
snake_case__ : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.csv.zip"
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(csv_path.replace(".csv" , ".CSV" ) ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(csva_path.replace(".csv" , ".CSV" ) ) )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : str ):
snake_case__ : Dict = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.csv.zip"
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.join("main_dir" , os.path.basename(lowerCamelCase__ ) ) )
f.write(lowerCamelCase__ , arcname=os.path.join("main_dir" , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.parquet" )
snake_case__ : str = pa.schema(
{
"col_1": pa.string(),
"col_2": pa.intaa(),
"col_3": pa.floataa(),
} )
with open(lowerCamelCase__ , "wb" ) as f:
snake_case__ : Union[str, Any] = pq.ParquetWriter(lowerCamelCase__ , schema=lowerCamelCase__ )
snake_case__ : Optional[Any] = pa.Table.from_pydict({k: [DATA[i][k] for i in range(len(lowerCamelCase__ ) )] for k in DATA[0]} , schema=lowerCamelCase__ )
writer.write_table(lowerCamelCase__ )
writer.close()
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
snake_case__ : Tuple = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
snake_case__ : int = {"data": DATA}
with open(lowerCamelCase__ , "w" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
snake_case__ : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset.json" )
snake_case__ : int = {"data": DATA_DICT_OF_LISTS}
with open(lowerCamelCase__ , "w" ) as f:
json.dump(lowerCamelCase__ , lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] ):
snake_case__ : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl" )
with open(lowerCamelCase__ , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase__ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple ):
snake_case__ : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset2.jsonl" )
with open(lowerCamelCase__ , "w" ) as f:
for item in DATA:
f.write(json.dumps(lowerCamelCase__ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Any = str(tmp_path_factory.mktemp("data" ) / "dataset_312.jsonl" )
with open(lowerCamelCase__ , "w" ) as f:
for item in DATA_312:
f.write(json.dumps(lowerCamelCase__ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
snake_case__ : Dict = str(tmp_path_factory.mktemp("data" ) / "dataset-str.jsonl" )
with open(lowerCamelCase__ , "w" ) as f:
for item in DATA_STR:
f.write(json.dumps(lowerCamelCase__ ) + "\n" )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : Union[str, Any] ):
import gzip
snake_case__ : int = str(tmp_path_factory.mktemp("data" ) / "dataset.txt.gz" )
with open(lowerCamelCase__ , "rb" ) as orig_file:
with gzip.open(lowerCamelCase__ , "wb" ) as zipped_file:
zipped_file.writelines(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Optional[int] ):
import gzip
snake_case__ : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.jsonl.gz" )
with open(lowerCamelCase__ , "rb" ) as orig_file:
with gzip.open(lowerCamelCase__ , "wb" ) as zipped_file:
zipped_file.writelines(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : int ):
snake_case__ : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.zip"
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : Optional[int] , snake_case_ : Optional[int] ):
snake_case__ : List[Any] = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.zip"
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.join("nested" , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Union[str, Any] , snake_case_ : Dict ):
snake_case__ : Any = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.jsonl.zip"
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.join("main_dir" , os.path.basename(lowerCamelCase__ ) ) )
f.write(lowerCamelCase__ , arcname=os.path.join("main_dir" , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : str , snake_case_ : Any ):
snake_case__ : Dict = tmp_path_factory.mktemp("data" ) / "dataset.jsonl.tar"
with tarfile.TarFile(lowerCamelCase__ , "w" ) as f:
f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.add(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] ):
snake_case__ : Dict = tmp_path_factory.mktemp("data" ) / "dataset_nested.jsonl.tar"
with tarfile.TarFile(lowerCamelCase__ , "w" ) as f:
f.add(lowerCamelCase__ , arcname=os.path.join("nested" , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : List[Any] = ["0", "1", "2", "3"]
snake_case__ : Union[str, Any] = str(tmp_path_factory.mktemp("data" ) / "dataset.txt" )
with open(lowerCamelCase__ , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : Union[str, Any] = ["0", "1", "2", "3"]
snake_case__ : Optional[Any] = str(tmp_path_factory.mktemp("data" ) / "dataset2.txt" )
with open(lowerCamelCase__ , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
snake_case__ : List[str] = ["0", "1", "2", "3"]
snake_case__ : Optional[Any] = tmp_path_factory.mktemp("data" ) / "dataset.abc"
with open(lowerCamelCase__ , "w" ) as f:
for item in data:
f.write(item + "\n" )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Optional[int] , snake_case_ : Any ):
snake_case__ : Tuple = tmp_path_factory.mktemp("data" ) / "dataset.text.zip"
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : List[Any] , snake_case_ : Union[str, Any] ):
snake_case__ : Any = tmp_path_factory.mktemp("data" ) / "dataset_with_dir.text.zip"
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.join("main_dir" , os.path.basename(lowerCamelCase__ ) ) )
f.write(lowerCamelCase__ , arcname=os.path.join("main_dir" , os.path.basename(lowerCamelCase__ ) ) )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict , snake_case_ : Tuple , snake_case_ : str ):
snake_case__ : Any = tmp_path_factory.mktemp("data" ) / "dataset.ext.zip"
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename("unsupported.ext" ) )
f.write(lowerCamelCase__ , arcname=os.path.basename("unsupported_2.ext" ) )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
snake_case__ : Union[str, Any] = "\n".join(["First", "Second\u2029with Unicode new line", "Third"] )
snake_case__ : List[str] = str(tmp_path_factory.mktemp("data" ) / "dataset_with_unicode_new_lines.txt" )
with open(lowerCamelCase__ , "w" , encoding="utf-8" ) as f:
f.write(lowerCamelCase__ )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( ):
return os.path.join("tests" , "features" , "data" , "test_image_rgb.jpg" )
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( ):
return os.path.join("tests" , "features" , "data" , "test_audio_44100.wav" )
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : Any ):
snake_case__ : Union[str, Any] = tmp_path_factory.mktemp("data" ) / "dataset.img.zip"
with zipfile.ZipFile(lowerCamelCase__ , "w" ) as f:
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ) )
f.write(lowerCamelCase__ , arcname=os.path.basename(lowerCamelCase__ ).replace(".jpg" , "2.jpg" ) )
return path
@pytest.fixture(scope="session" )
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] ):
snake_case__ : Optional[Any] = tmp_path_factory.mktemp("data_dir" )
(data_dir / "subdir").mkdir()
with open(data_dir / "subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / "subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden file
with open(data_dir / "subdir" / ".test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
# hidden directory
(data_dir / ".subdir").mkdir()
with open(data_dir / ".subdir" / "train.txt" , "w" ) as f:
f.write("foo\n" * 10 )
with open(data_dir / ".subdir" / "test.txt" , "w" ) as f:
f.write("bar\n" * 10 )
return data_dir
| 714 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ):
snake_case__ : Tuple = args.log_outputs
snake_case__ : Union[str, Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
snake_case__ : List[str] = load_metric("wer" )
snake_case__ : List[str] = load_metric("cer" )
# compute metrics
snake_case__ : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
snake_case__ : List[str] = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
snake_case__ : Dict = F'''WER: {wer_result}\nCER: {cer_result}'''
print(snake_case_ )
with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case__ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt'''
snake_case__ : int = F'''log_{dataset_id}_targets.txt'''
with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Union[str, Any] , snake_case_ : Any ):
p.write(F'''{i}''' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(F'''{i}''' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case_ , with_indices=snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : List[Any] = "[,?.!\-\;\:\"โ%โโ๏ฟฝโโโฆโ]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case__ : Optional[int] = re.sub(snake_case_ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case__ : Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
snake_case__ : Optional[int] = " ".join(text.split(snake_case_ ) )
return text
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# load dataset
snake_case__ : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case__ : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case__ : List[Any] = feature_extractor.sampling_rate
# resample audio
snake_case__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
snake_case__ : int = 0 if torch.cuda.is_available() else -1
snake_case__ : List[str] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Any ):
snake_case__ : Union[str, Any] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case__ : Optional[int] = prediction["text"]
snake_case__ : Optional[Any] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
snake_case__ : Any = dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with ๐ค Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with ๐ค Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__lowerCamelCase : str = parser.parse_args()
main(args)
| 25 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.