code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
# DISCLAIMER: This file is strongly influenced by https://github.com/ermongroup/ddim
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import flax
import jax
import jax.numpy as jnp
from ..configuration_utils import ConfigMixin, register_to_config
from .scheduling_utils_flax import (
CommonSchedulerState,
FlaxKarrasDiffusionSchedulers,
FlaxSchedulerMixin,
FlaxSchedulerOutput,
add_noise_common,
get_velocity_common,
)
@flax.struct.dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = 42
# setable values
a_ = 42
a_ = 42
a_ = None
@classmethod
def _lowercase ( cls : Optional[int] , __A : CommonSchedulerState , __A : jnp.ndarray , __A : jnp.ndarray ):
return cls(common=__A , init_noise_sigma=__A , timesteps=__A )
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = 42
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ ):
"""simple docstring"""
a_ = [e.name for e in FlaxKarrasDiffusionSchedulers]
a_ = 42
@property
def _lowercase ( self : Dict ):
return True
@register_to_config
def __init__( self : Dict , __A : int = 1_0_0_0 , __A : float = 0.0_0_0_1 , __A : float = 0.0_2 , __A : str = "linear" , __A : Optional[jnp.ndarray] = None , __A : str = "fixed_small" , __A : bool = True , __A : str = "epsilon" , __A : jnp.dtype = jnp.floataa , ):
snake_case__ : Any = dtype
def _lowercase ( self : str , __A : Optional[CommonSchedulerState] = None ):
if common is None:
snake_case__ : str = CommonSchedulerState.create(self )
# standard deviation of the initial noise distribution
snake_case__ : Union[str, Any] = jnp.array(1.0 , dtype=self.dtype )
snake_case__ : Tuple = jnp.arange(0 , self.config.num_train_timesteps ).round()[::-1]
return DDPMSchedulerState.create(
common=__A , init_noise_sigma=__A , timesteps=__A , )
def _lowercase ( self : List[Any] , __A : DDPMSchedulerState , __A : jnp.ndarray , __A : Optional[int] = None ):
return sample
def _lowercase ( self : List[str] , __A : DDPMSchedulerState , __A : int , __A : Tuple = () ):
snake_case__ : Optional[Any] = self.config.num_train_timesteps // num_inference_steps
# creates integer timesteps by multiplying by ratio
# rounding to avoid issues when num_inference_step is power of 3
snake_case__ : int = (jnp.arange(0 , __A ) * step_ratio).round()[::-1]
return state.replace(
num_inference_steps=__A , timesteps=__A , )
def _lowercase ( self : Dict , __A : DDPMSchedulerState , __A : Optional[Any] , __A : List[Any]=None , __A : Optional[int]=None ):
snake_case__ : Union[str, Any] = state.common.alphas_cumprod[t]
snake_case__ : Tuple = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
# For t > 0, compute predicted variance βt (see formula (6) and (7) from https://arxiv.org/pdf/2006.11239.pdf)
# and sample from it to get previous sample
# x_{t-1} ~ N(pred_prev_sample, variance) == add variance to pred_sample
snake_case__ : List[str] = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * state.common.betas[t]
if variance_type is None:
snake_case__ : Dict = self.config.variance_type
# hacks - were probably added for training stability
if variance_type == "fixed_small":
snake_case__ : Union[str, Any] = jnp.clip(__A , a_min=1e-2_0 )
# for rl-diffuser https://arxiv.org/abs/2205.09991
elif variance_type == "fixed_small_log":
snake_case__ : Optional[int] = jnp.log(jnp.clip(__A , a_min=1e-2_0 ) )
elif variance_type == "fixed_large":
snake_case__ : Any = state.common.betas[t]
elif variance_type == "fixed_large_log":
# Glide max_log
snake_case__ : int = jnp.log(state.common.betas[t] )
elif variance_type == "learned":
return predicted_variance
elif variance_type == "learned_range":
snake_case__ : int = variance
snake_case__ : Dict = state.common.betas[t]
snake_case__ : int = (predicted_variance + 1) / 2
snake_case__ : str = frac * max_log + (1 - frac) * min_log
return variance
def _lowercase ( self : Optional[int] , __A : DDPMSchedulerState , __A : jnp.ndarray , __A : int , __A : jnp.ndarray , __A : Optional[jax.random.KeyArray] = None , __A : bool = True , ):
snake_case__ : int = timestep
if key is None:
snake_case__ : str = jax.random.PRNGKey(0 )
if model_output.shape[1] == sample.shape[1] * 2 and self.config.variance_type in ["learned", "learned_range"]:
snake_case__, snake_case__ : Tuple = jnp.split(__A , sample.shape[1] , axis=1 )
else:
snake_case__ : Any = None
# 1. compute alphas, betas
snake_case__ : Optional[int] = state.common.alphas_cumprod[t]
snake_case__ : List[Any] = jnp.where(t > 0 , state.common.alphas_cumprod[t - 1] , jnp.array(1.0 , dtype=self.dtype ) )
snake_case__ : Optional[int] = 1 - alpha_prod_t
snake_case__ : Tuple = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if self.config.prediction_type == "epsilon":
snake_case__ : Optional[Any] = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif self.config.prediction_type == "sample":
snake_case__ : str = model_output
elif self.config.prediction_type == "v_prediction":
snake_case__ : Optional[int] = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
else:
raise ValueError(
f'''prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample` '''
" for the FlaxDDPMScheduler." )
# 3. Clip "predicted x_0"
if self.config.clip_sample:
snake_case__ : Optional[Any] = jnp.clip(__A , -1 , 1 )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ : Optional[Any] = (alpha_prod_t_prev ** 0.5 * state.common.betas[t]) / beta_prod_t
snake_case__ : List[Any] = state.common.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
snake_case__ : Dict = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
def random_variance():
snake_case__ : Optional[int] = jax.random.split(__A , num=1 )
snake_case__ : Union[str, Any] = jax.random.normal(__A , shape=model_output.shape , dtype=self.dtype )
return (self._get_variance(__A , __A , predicted_variance=__A ) ** 0.5) * noise
snake_case__ : List[Any] = jnp.where(t > 0 , random_variance() , jnp.zeros(model_output.shape , dtype=self.dtype ) )
snake_case__ : List[str] = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample, state)
return FlaxDDPMSchedulerOutput(prev_sample=__A , state=__A )
def _lowercase ( self : str , __A : DDPMSchedulerState , __A : jnp.ndarray , __A : jnp.ndarray , __A : jnp.ndarray , ):
return add_noise_common(state.common , __A , __A , __A )
def _lowercase ( self : List[Any] , __A : DDPMSchedulerState , __A : jnp.ndarray , __A : jnp.ndarray , __A : jnp.ndarray , ):
return get_velocity_common(state.common , __A , __A , __A )
def __len__( self : Dict ):
return self.config.num_train_timesteps
| 25 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Optional[Any] = parent
snake_case__ : str = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : Optional[Any] = min_resolution
snake_case__ : List[str] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : str = size
snake_case__ : str = do_normalize
snake_case__ : Optional[Any] = image_mean
snake_case__ : List[str] = image_std
snake_case__ : List[str] = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : Tuple = do_pad
def _lowercase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ):
if not batched:
snake_case__ : List[Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : str = image.size
else:
snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Any = int(self.size["shortest_edge"] * h / w )
snake_case__ : Any = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Any = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Tuple = self.size["shortest_edge"]
snake_case__ : int = self.size["shortest_edge"]
else:
snake_case__ : Any = []
for image in image_inputs:
snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : int = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Tuple ):
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : str ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : int ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[Any] ):
# prepare image and target
snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Tuple = json.loads(f.read() )
snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : str = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image, target and masks_path
snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 1 |
import argparse
import torch
from transformers import (
SpeechTaConfig,
SpeechTaFeatureExtractor,
SpeechTaForSpeechToSpeech,
SpeechTaForSpeechToText,
SpeechTaForTextToSpeech,
SpeechTaProcessor,
SpeechTaTokenizer,
logging,
)
from transformers.tokenization_utils import AddedToken
logging.set_verbosity_info()
__lowerCamelCase : Any = logging.get_logger("""transformers.models.speecht5""")
__lowerCamelCase : List[Any] = {
"""speech_encoder_prenet.layer_norm""": """speecht5.encoder.prenet.feature_projection.layer_norm""",
"""speech_encoder_prenet.post_extract_proj""": """speecht5.encoder.prenet.feature_projection.projection""",
"""speech_encoder_prenet.pos_conv.0""": """speecht5.encoder.prenet.pos_conv_embed.conv""",
"""speech_encoder_prenet.mask_emb""": """speecht5.encoder.prenet.masked_spec_embed""",
}
__lowerCamelCase : List[str] = {
"""text_encoder_prenet.encoder_prenet.0""": """speecht5.encoder.prenet.embed_tokens""",
"""text_encoder_prenet.encoder_prenet.1.alpha""": """speecht5.encoder.prenet.encode_positions.alpha""",
}
__lowerCamelCase : List[Any] = {
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.0.0""": """speecht5.decoder.prenet.layers.0""",
"""speech_decoder_prenet.decoder_prenet.0.0.prenet.1.0""": """speecht5.decoder.prenet.layers.1""",
"""speech_decoder_prenet.decoder_prenet.0.1""": """speecht5.decoder.prenet.final_layer""",
"""speech_decoder_prenet.decoder_prenet.1.alpha""": """speecht5.decoder.prenet.encode_positions.alpha""",
"""speech_decoder_prenet.spkembs_layer.0""": """speecht5.decoder.prenet.speaker_embeds_layer""",
}
__lowerCamelCase : Any = {
"""speech_decoder_postnet.feat_out""": """speech_decoder_postnet.feat_out""",
"""speech_decoder_postnet.prob_out""": """speech_decoder_postnet.prob_out""",
"""speech_decoder_postnet.postnet.postnet.0.0""": """speech_decoder_postnet.layers.0.conv""",
"""speech_decoder_postnet.postnet.postnet.0.1""": """speech_decoder_postnet.layers.0.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.1.0""": """speech_decoder_postnet.layers.1.conv""",
"""speech_decoder_postnet.postnet.postnet.1.1""": """speech_decoder_postnet.layers.1.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.2.0""": """speech_decoder_postnet.layers.2.conv""",
"""speech_decoder_postnet.postnet.postnet.2.1""": """speech_decoder_postnet.layers.2.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.3.0""": """speech_decoder_postnet.layers.3.conv""",
"""speech_decoder_postnet.postnet.postnet.3.1""": """speech_decoder_postnet.layers.3.batch_norm""",
"""speech_decoder_postnet.postnet.postnet.4.0""": """speech_decoder_postnet.layers.4.conv""",
"""speech_decoder_postnet.postnet.postnet.4.1""": """speech_decoder_postnet.layers.4.batch_norm""",
}
__lowerCamelCase : str = {
"""text_decoder_prenet.embed_tokens""": """speecht5.decoder.prenet.embed_tokens""",
}
__lowerCamelCase : str = {
"""text_decoder_postnet.output_projection""": """text_decoder_postnet.lm_head""",
}
__lowerCamelCase : Optional[int] = {
"""encoder.layers.*.self_attn.k_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.k_proj""",
"""encoder.layers.*.self_attn.v_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.v_proj""",
"""encoder.layers.*.self_attn.q_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.q_proj""",
"""encoder.layers.*.self_attn.out_proj""": """speecht5.encoder.wrapped_encoder.layers.*.attention.out_proj""",
"""encoder.layers.*.self_attn_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.layer_norm""",
"""encoder.layers.*.fc1""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.intermediate_dense""",
"""encoder.layers.*.fc2""": """speecht5.encoder.wrapped_encoder.layers.*.feed_forward.output_dense""",
"""encoder.layers.*.final_layer_norm""": """speecht5.encoder.wrapped_encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """speecht5.encoder.wrapped_encoder.layer_norm""",
"""encoder.pos_emb.pe_k""": """speecht5.encoder.wrapped_encoder.embed_positions.pe_k""",
}
__lowerCamelCase : List[str] = {
"""decoder.layers.*.self_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.k_proj""",
"""decoder.layers.*.self_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.v_proj""",
"""decoder.layers.*.self_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.q_proj""",
"""decoder.layers.*.self_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn.out_proj""",
"""decoder.layers.*.self_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.self_attn_layer_norm""",
"""decoder.layers.*.encoder_attn.k_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.k_proj""",
"""decoder.layers.*.encoder_attn.v_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.v_proj""",
"""decoder.layers.*.encoder_attn.q_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.q_proj""",
"""decoder.layers.*.encoder_attn.out_proj""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn.out_proj""",
"""decoder.layers.*.encoder_attn_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.encoder_attn_layer_norm""",
"""decoder.layers.*.fc1""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.intermediate_dense""",
"""decoder.layers.*.fc2""": """speecht5.decoder.wrapped_decoder.layers.*.feed_forward.output_dense""",
"""decoder.layers.*.final_layer_norm""": """speecht5.decoder.wrapped_decoder.layers.*.final_layer_norm""",
}
__lowerCamelCase : Optional[int] = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_TEXT_DECODER_PRENET,
**MAPPING_TEXT_DECODER_POSTNET,
}
__lowerCamelCase : List[str] = {
**MAPPING_TEXT_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowerCamelCase : Tuple = {
**MAPPING_SPEECH_ENCODER_PRENET,
**MAPPING_ENCODER,
**MAPPING_DECODER,
**MAPPING_SPEECH_DECODER_PRENET,
**MAPPING_SPEECH_DECODER_POSTNET,
}
__lowerCamelCase : Dict = []
__lowerCamelCase : Dict = [
"""encoder.version""",
"""encoder.layers.*.norm_k.weight""",
"""encoder.layers.*.norm_k.bias""",
"""decoder.version""",
"""decoder.layers.*.norm_k.weight""",
"""decoder.layers.*.norm_k.bias""",
"""decoder.pos_emb.pe_k""",
"""speech_encoder_prenet.embed_positions._float_tensor""",
"""text_decoder_prenet.embed_positions._float_tensor""",
]
__lowerCamelCase : Any = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""speech_decoder_prenet.*""",
"""speech_decoder_postnet.*""",
]
__lowerCamelCase : Dict = IGNORE_KEYS + [
"""encoder.proj""",
"""speech_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
__lowerCamelCase : Union[str, Any] = IGNORE_KEYS + [
"""encoder.proj""",
"""text_encoder_prenet.*""",
"""text_decoder_prenet.*""",
"""text_decoder_postnet.*""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : List[Any] ):
for attribute in key.split("." ):
snake_case__ : Union[str, Any] = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Union[str, Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : Tuple = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : Dict = value
elif weight_type == "weight_g":
snake_case__ : List[Any] = value
elif weight_type == "weight_v":
snake_case__ : int = value
elif weight_type == "bias":
snake_case__ : List[Any] = value
elif weight_type == "running_mean":
snake_case__ : Optional[Any] = value
elif weight_type == "running_var":
snake_case__ : Dict = value
elif weight_type == "num_batches_tracked":
snake_case__ : Optional[int] = value
else:
snake_case__ : Tuple = value
logger.info(F'''{key + ('.' + weight_type if weight_type is not None else '')} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] ):
for key in ignore_keys:
if key.endswith(".*" ):
if name.startswith(key[:-1] ):
return True
elif ".*." in key:
snake_case__, snake_case__ : Optional[Any] = key.split(".*." )
if prefix in name and suffix in name:
return True
elif key in name:
return True
return False
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Dict , snake_case_ : Dict ):
snake_case__ : Optional[Any] = []
if task == "s2t":
snake_case__ : Union[str, Any] = hf_model.speechta.encoder.prenet.feature_encoder
snake_case__ : int = MAPPING_S2T
snake_case__ : List[str] = IGNORE_KEYS_S2T
elif task == "t2s":
snake_case__ : str = None
snake_case__ : Optional[Any] = MAPPING_T2S
snake_case__ : Tuple = IGNORE_KEYS_T2S
elif task == "s2s":
snake_case__ : List[Any] = hf_model.speechta.encoder.prenet.feature_encoder
snake_case__ : Any = MAPPING_S2S
snake_case__ : Tuple = IGNORE_KEYS_S2S
else:
raise ValueError(F'''Unsupported task: {task}''' )
for name, value in fairseq_dict.items():
if should_ignore(snake_case_ , snake_case_ ):
logger.info(F'''{name} was ignored''' )
continue
snake_case__ : Any = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : Dict = True
else:
for key, mapped_key in MAPPING.items():
# mapped_key = "speecht5." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if "*" in key:
snake_case__, snake_case__ : Tuple = key.split(".*." )
if prefix in name and suffix in name:
snake_case__ : Union[str, Any] = suffix
# if key in name or key.split("w2v_model.")[-1] == name.split(".")[0]:
if key in name:
snake_case__ : Optional[Any] = True
if "*" in mapped_key:
snake_case__ : Dict = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : List[Any] = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : Any = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[int] = "weight_v"
elif "bias" in name:
snake_case__ : List[str] = "bias"
elif "weight" in name:
snake_case__ : Tuple = "weight"
elif "running_mean" in name:
snake_case__ : Any = "running_mean"
elif "running_var" in name:
snake_case__ : Dict = "running_var"
elif "num_batches_tracked" in name:
snake_case__ : Optional[int] = "num_batches_tracked"
else:
snake_case__ : List[str] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : str , snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : int ):
snake_case__ : Optional[int] = full_name.split("conv_layers." )[-1]
snake_case__ : Optional[Any] = name.split("." )
snake_case__ : Optional[Any] = int(items[0] )
snake_case__ : int = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Union[str, Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : str = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : Dict = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Tuple , snake_case_ : Tuple , snake_case_ : int=None , snake_case_ : str=None , snake_case_ : int=None , ):
if config_path is not None:
snake_case__ : List[Any] = SpeechTaConfig.from_pretrained(snake_case_ )
else:
snake_case__ : List[Any] = SpeechTaConfig()
if task == "s2t":
snake_case__ : List[Any] = config.max_text_positions
snake_case__ : int = SpeechTaForSpeechToText(snake_case_ )
elif task == "t2s":
snake_case__ : List[str] = 1876
snake_case__ : List[Any] = 600
snake_case__ : List[Any] = config.max_speech_positions
snake_case__ : Any = SpeechTaForTextToSpeech(snake_case_ )
elif task == "s2s":
snake_case__ : Tuple = 1876
snake_case__ : int = config.max_speech_positions
snake_case__ : Optional[int] = SpeechTaForSpeechToSpeech(snake_case_ )
else:
raise ValueError(F'''Unknown task name: {task}''' )
if vocab_path:
snake_case__ : Any = SpeechTaTokenizer(snake_case_ , model_max_length=config.max_text_positions )
# Mask token behaves like a normal word, i.e. include the space before it
snake_case__ : List[str] = AddedToken("<mask>" , lstrip=snake_case_ , rstrip=snake_case_ )
snake_case__ : List[Any] = mask_token
tokenizer.add_special_tokens({"mask_token": mask_token} )
tokenizer.add_tokens(["<ctc_blank>"] )
snake_case__ : Any = SpeechTaFeatureExtractor()
snake_case__ : Union[str, Any] = SpeechTaProcessor(tokenizer=snake_case_ , feature_extractor=snake_case_ )
processor.save_pretrained(snake_case_ )
snake_case__ : str = torch.load(snake_case_ )
recursively_load_weights(fairseq_checkpoint["model"] , snake_case_ , snake_case_ )
model.save_pretrained(snake_case_ )
if repo_id:
print("Pushing to the hub..." )
processor.push_to_hub(snake_case_ )
model.push_to_hub(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
parser.add_argument(
"""--task""",
default="""s2t""",
type=str,
help="""Type of the SpeechT5 model you'd like to convert. Should be one of 's2t', 't2s', 's2s'.""",
)
parser.add_argument("""--checkpoint_path""", required=True, default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--vocab_path""", default=None, type=str, help="""Path to SentencePiece model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--pytorch_dump_folder_path""", required=True, default=None, type=str, help="""Path to the output PyTorch model."""
)
parser.add_argument(
"""--push_to_hub""", default=None, type=str, help="""Where to upload the converted model on the 🤗 hub."""
)
__lowerCamelCase : int = parser.parse_args()
convert_speechta_checkpoint(
args.task,
args.checkpoint_path,
args.pytorch_dump_folder_path,
args.config_path,
args.vocab_path,
args.push_to_hub,
)
| 25 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCamelCase : Tuple = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
snake_case__ : Optional[int] = bs[:]
snake_case__ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
snake_case__ : Dict = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__ : Dict = set()
snake_case__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : List[Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ):
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="utf-8" ) as vocab_handle:
snake_case__ : Any = json.load(__A )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
snake_case__ : Union[str, Any] = errors # how to handle errors in decoding
snake_case__ : Any = bytes_to_unicode()
snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="utf-8" ) as merges_handle:
snake_case__ : str = merges_handle.read().split("\n" )[1:-1]
snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Optional[int] = {}
snake_case__ : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _lowercase ( self : List[Any] ):
return len(self.encoder )
def _lowercase ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Optional[Any] , __A : Optional[int] ):
if token in self.cache:
return self.cache[token]
snake_case__ : Union[str, Any] = tuple(__A )
snake_case__ : List[Any] = get_pairs(__A )
if not pairs:
return token
while True:
snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : Dict = bigram
snake_case__ : str = []
snake_case__ : Union[str, Any] = 0
while i < len(__A ):
try:
snake_case__ : Dict = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : str = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : str = tuple(__A )
snake_case__ : int = new_word
if len(__A ) == 1:
break
else:
snake_case__ : List[str] = get_pairs(__A )
snake_case__ : List[Any] = " ".join(__A )
snake_case__ : Optional[int] = word
return word
def _lowercase ( self : Optional[Any] , __A : Optional[Any] ):
snake_case__ : List[str] = []
for token in re.findall(self.pat , __A ):
snake_case__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) )
return bpe_tokens
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , __A : Optional[Any] ):
return self.decoder.get(__A )
def _lowercase ( self : Union[str, Any] , __A : Dict ):
snake_case__ : Optional[Any] = "".join(__A )
snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : str = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
snake_case__ : str = 0
with open(__A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case__ : int = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ):
snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
snake_case__ : Optional[int] = " " + text
return (text, kwargs)
def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
snake_case__ : Optional[Any] = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case__ : int = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case__ : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Optional[Any] = {
"""configuration_clipseg""": [
"""CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""CLIPSegConfig""",
"""CLIPSegTextConfig""",
"""CLIPSegVisionConfig""",
],
"""processing_clipseg""": ["""CLIPSegProcessor"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""CLIPSegModel""",
"""CLIPSegPreTrainedModel""",
"""CLIPSegTextModel""",
"""CLIPSegVisionModel""",
"""CLIPSegForImageSegmentation""",
]
if TYPE_CHECKING:
from .configuration_clipseg import (
CLIPSEG_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPSegConfig,
CLIPSegTextConfig,
CLIPSegVisionConfig,
)
from .processing_clipseg import CLIPSegProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clipseg import (
CLIPSEG_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPSegForImageSegmentation,
CLIPSegModel,
CLIPSegPreTrainedModel,
CLIPSegTextModel,
CLIPSegVisionModel,
)
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 25 | 1 |
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Any = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
}
__lowerCamelCase : List[Any] = {
"""vocab_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json"""},
"""merges_file""": {"""ctrl""": """https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt"""},
}
__lowerCamelCase : Optional[int] = {
"""ctrl""": 256,
}
__lowerCamelCase : Any = {
"""Pregnancy""": 16_8629,
"""Christianity""": 7675,
"""Explain""": 10_6423,
"""Fitness""": 6_3440,
"""Saving""": 6_3163,
"""Ask""": 2_7171,
"""Ass""": 9_5985,
"""Joke""": 16_3509,
"""Questions""": 4_5622,
"""Thoughts""": 4_9605,
"""Retail""": 5_2342,
"""Feminism""": 16_4338,
"""Writing""": 1_1992,
"""Atheism""": 19_2263,
"""Netflix""": 4_8616,
"""Computing""": 3_9639,
"""Opinion""": 4_3213,
"""Alone""": 4_4967,
"""Funny""": 5_8917,
"""Gaming""": 4_0358,
"""Human""": 4088,
"""India""": 1331,
"""Joker""": 7_7138,
"""Diet""": 3_6206,
"""Legal""": 1_1859,
"""Norman""": 4939,
"""Tip""": 7_2689,
"""Weight""": 5_2343,
"""Movies""": 4_6273,
"""Running""": 2_3425,
"""Science""": 2090,
"""Horror""": 3_7793,
"""Confession""": 6_0572,
"""Finance""": 1_2250,
"""Politics""": 1_6360,
"""Scary""": 19_1985,
"""Support""": 1_2654,
"""Technologies""": 3_2516,
"""Teenage""": 6_6160,
"""Event""": 3_2769,
"""Learned""": 6_7460,
"""Notion""": 18_2770,
"""Wikipedia""": 3_7583,
"""Books""": 6665,
"""Extract""": 7_6050,
"""Confessions""": 10_2701,
"""Conspiracy""": 7_5932,
"""Links""": 6_3674,
"""Narcissus""": 15_0425,
"""Relationship""": 5_4766,
"""Relationships""": 13_4796,
"""Reviews""": 4_1671,
"""News""": 4256,
"""Translation""": 2_6820,
"""multilingual""": 12_8406,
}
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
snake_case__ : List[str] = set()
snake_case__ : List[Any] = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : List[str] = char
snake_case__ : Optional[int] = set(snake_case_ )
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = CONTROL_CODES
def __init__( self : str , __A : Optional[int] , __A : Tuple , __A : Dict="<unk>" , **__A : int ):
super().__init__(unk_token=__A , **__A )
with open(__A , encoding="utf-8" ) as vocab_handle:
snake_case__ : List[str] = json.load(__A )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
with open(__A , encoding="utf-8" ) as merges_handle:
snake_case__ : Optional[Any] = merges_handle.read().split("\n" )[1:-1]
snake_case__ : Optional[int] = [tuple(merge.split() ) for merge in merges]
snake_case__ : List[Any] = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : str = {}
@property
def _lowercase ( self : int ):
return len(self.encoder )
def _lowercase ( self : Optional[Any] ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : str , __A : str ):
if token in self.cache:
return self.cache[token]
snake_case__ : Optional[Any] = tuple(__A )
snake_case__ : List[Any] = tuple(list(word[:-1] ) + [word[-1] + "</w>"] )
snake_case__ : str = get_pairs(__A )
if not pairs:
return token
while True:
snake_case__ : int = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : Union[str, Any] = bigram
snake_case__ : Tuple = []
snake_case__ : Dict = 0
while i < len(__A ):
try:
snake_case__ : Dict = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : List[str] = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : Any = tuple(__A )
snake_case__ : Union[str, Any] = new_word
if len(__A ) == 1:
break
else:
snake_case__ : int = get_pairs(__A )
snake_case__ : List[str] = "@@ ".join(__A )
snake_case__ : Tuple = word[:-4]
snake_case__ : Union[str, Any] = word
return word
def _lowercase ( self : int , __A : Optional[int] ):
snake_case__ : Optional[int] = []
snake_case__ : Optional[int] = re.findall(R"\S+\n?" , __A )
for token in words:
split_tokens.extend(list(self.bpe(__A ).split(" " ) ) )
return split_tokens
def _lowercase ( self : List[Any] , __A : Dict ):
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , __A : Union[str, Any] ):
return self.decoder.get(__A , self.unk_token )
def _lowercase ( self : Optional[Any] , __A : Optional[int] ):
snake_case__ : int = " ".join(__A ).replace("@@ " , "" ).strip()
return out_string
def _lowercase ( self : Tuple , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Any = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : Any = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
snake_case__ : str = 0
with open(__A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case__ : Tuple = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far)
| 25 |
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Any = [0] * len(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
# use last results for better performance - dynamic programming
snake_case__ : Union[str, Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
snake_case__ : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
snake_case__ : int = j
return prefix_result
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return max(prefix_function(snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"""microsoft/swin-tiny-patch4-window7-224""": (
"""https://huggingface.co/microsoft/swin-tiny-patch4-window7-224/resolve/main/config.json"""
),
# See all Swin models at https://huggingface.co/models?filter=swin
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ ):
"""simple docstring"""
a_ = "swin"
a_ = {
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[int] , __A : Optional[int]=2_2_4 , __A : Optional[int]=4 , __A : Any=3 , __A : List[Any]=9_6 , __A : Union[str, Any]=[2, 2, 6, 2] , __A : List[Any]=[3, 6, 1_2, 2_4] , __A : str=7 , __A : Any=4.0 , __A : int=True , __A : int=0.0 , __A : Union[str, Any]=0.0 , __A : Union[str, Any]=0.1 , __A : Optional[Any]="gelu" , __A : Dict=False , __A : List[Any]=0.0_2 , __A : Any=1e-5 , __A : Optional[int]=3_2 , __A : Optional[int]=None , __A : str=None , **__A : Dict , ):
super().__init__(**__A )
snake_case__ : str = image_size
snake_case__ : Optional[Any] = patch_size
snake_case__ : Tuple = num_channels
snake_case__ : Any = embed_dim
snake_case__ : Optional[Any] = depths
snake_case__ : Tuple = len(__A )
snake_case__ : int = num_heads
snake_case__ : str = window_size
snake_case__ : Dict = mlp_ratio
snake_case__ : List[str] = qkv_bias
snake_case__ : str = hidden_dropout_prob
snake_case__ : Optional[Any] = attention_probs_dropout_prob
snake_case__ : str = drop_path_rate
snake_case__ : List[str] = hidden_act
snake_case__ : Union[str, Any] = use_absolute_embeddings
snake_case__ : Union[str, Any] = layer_norm_eps
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Union[str, Any] = encoder_stride
# we set the hidden_size attribute in order to make Swin work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
snake_case__ : Optional[int] = int(embed_dim * 2 ** (len(__A ) - 1) )
snake_case__ : List[Any] = ["stem"] + [f'''stage{idx}''' for idx in range(1 , len(__A ) + 1 )]
snake_case__, snake_case__ : int = get_aligned_output_features_output_indices(
out_features=__A , out_indices=__A , stage_names=self.stage_names )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = version.parse("1.11" )
@property
def _lowercase ( self : str ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase ( self : int ):
return 1e-4
| 25 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[int] = get_logger()
__lowerCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case__ : str = str(jax.devices()[0] )
snake_case__ : str = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def _lowercase ( self : Optional[Any] , __A : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def _lowercase ( self : int , __A : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ : Optional[int] = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case__ : Any = {"dtype": jnp.intaa}
else:
snake_case__ : Tuple = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
snake_case__ : Optional[Any] = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ):
snake_case__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def _lowercase ( self : Tuple , __A : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def _lowercase ( self : Optional[int] , __A : pa.Table ):
snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A )
snake_case__ : Tuple = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def _lowercase ( self : Optional[Any] , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A )
snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
snake_case__ : Dict = self._consolidate(__A )
return column
def _lowercase ( self : str , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A )
snake_case__ : int = self.python_features_decoder.decode_batch(__A )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
for column_name in batch:
snake_case__ : Any = self._consolidate(batch[column_name] )
return batch
| 25 | 1 |
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from timm.data import resolve_data_config
from timm.data.transforms_factory import create_transform
from transformers import (
BitConfig,
ViTHybridConfig,
ViTHybridForImageClassification,
ViTHybridImageProcessor,
ViTHybridModel,
)
from transformers.image_utils import PILImageResampling
from transformers.utils import logging
logging.set_verbosity_info()
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : List[Any]=False ):
snake_case__ : Dict = []
# fmt: off
# stem:
rename_keys.append(("cls_token", "vit.embeddings.cls_token") )
rename_keys.append(("pos_embed", "vit.embeddings.position_embeddings") )
rename_keys.append(("patch_embed.proj.weight", "vit.embeddings.patch_embeddings.projection.weight") )
rename_keys.append(("patch_embed.proj.bias", "vit.embeddings.patch_embeddings.projection.bias") )
# backbone
rename_keys.append(("patch_embed.backbone.stem.conv.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.convolution.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.weight", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.weight") )
rename_keys.append(("patch_embed.backbone.stem.norm.bias", "vit.embeddings.patch_embeddings.backbone.bit.embedder.norm.bias") )
for stage_idx in range(len(config.backbone_config.depths ) ):
for layer_idx in range(config.backbone_config.depths[stage_idx] ):
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm1.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm1.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm2.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm2.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.conv3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.conv3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.{layer_idx}.norm3.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.{layer_idx}.norm3.bias''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.conv.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.conv.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.weight''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.weight''') )
rename_keys.append((F'''patch_embed.backbone.stages.{stage_idx}.blocks.0.downsample.norm.bias''', F'''vit.embeddings.patch_embeddings.backbone.bit.encoder.stages.{stage_idx}.layers.0.downsample.norm.bias''') )
# transformer encoder
for i in range(config.num_hidden_layers ):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'''blocks.{i}.norm1.weight''', F'''vit.encoder.layer.{i}.layernorm_before.weight''') )
rename_keys.append((F'''blocks.{i}.norm1.bias''', F'''vit.encoder.layer.{i}.layernorm_before.bias''') )
rename_keys.append((F'''blocks.{i}.attn.proj.weight''', F'''vit.encoder.layer.{i}.attention.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.attn.proj.bias''', F'''vit.encoder.layer.{i}.attention.output.dense.bias''') )
rename_keys.append((F'''blocks.{i}.norm2.weight''', F'''vit.encoder.layer.{i}.layernorm_after.weight''') )
rename_keys.append((F'''blocks.{i}.norm2.bias''', F'''vit.encoder.layer.{i}.layernorm_after.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.weight''', F'''vit.encoder.layer.{i}.intermediate.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc1.bias''', F'''vit.encoder.layer.{i}.intermediate.dense.bias''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.weight''', F'''vit.encoder.layer.{i}.output.dense.weight''') )
rename_keys.append((F'''blocks.{i}.mlp.fc2.bias''', F'''vit.encoder.layer.{i}.output.dense.bias''') )
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("norm.weight", "layernorm.weight"),
("norm.bias", "layernorm.bias"),
("pre_logits.fc.weight", "pooler.dense.weight"),
("pre_logits.fc.bias", "pooler.dense.bias"),
] )
# if just the base model, we should remove "vit" from all keys that start with "vit"
snake_case__ : List[str] = [(pair[0], pair[1][4:]) if pair[1].startswith("vit" ) else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("norm.weight", "vit.layernorm.weight"),
("norm.bias", "vit.layernorm.bias"),
("head.weight", "classifier.weight"),
("head.bias", "classifier.bias"),
] )
# fmt: on
return rename_keys
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : Optional[Any]=False ):
for i in range(config.num_hidden_layers ):
if base_model:
snake_case__ : Tuple = ""
else:
snake_case__ : Optional[int] = "vit."
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
snake_case__ : List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.weight''' )
snake_case__ : List[Any] = state_dict.pop(F'''blocks.{i}.attn.qkv.bias''' )
# next, add query, keys and values (in that order) to the state dict
snake_case__ : Any = in_proj_weight[
: config.hidden_size, :
]
snake_case__ : int = in_proj_bias[: config.hidden_size]
snake_case__ : Optional[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
snake_case__ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
snake_case__ : int = in_proj_weight[
-config.hidden_size :, :
]
snake_case__ : List[Any] = in_proj_bias[-config.hidden_size :]
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Dict = ["head.weight", "head.bias"]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : int , snake_case_ : str ):
snake_case__ : str = dct.pop(snake_case_ )
snake_case__ : int = val
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : str = "http://images.cocodataset.org/val2017/000000039769.jpg"
snake_case__ : str = Image.open(requests.get(snake_case_ , stream=snake_case_ ).raw )
return im
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : List[str] , snake_case_ : str=False ):
snake_case__ : Union[str, Any] = BitConfig(
global_padding="same" , layer_type="bottleneck" , depths=(3, 4, 9) , out_features=["stage3"] , embedding_dynamic_padding=snake_case_ , )
snake_case__ : Optional[int] = ViTHybridConfig(backbone_config=snake_case_ , image_size=384 , num_labels=1000 )
snake_case__ : Optional[int] = False
# load original model from timm
snake_case__ : Dict = timm.create_model(snake_case_ , pretrained=snake_case_ )
timm_model.eval()
# load state_dict of original model, remove and rename some keys
snake_case__ : str = timm_model.state_dict()
if base_model:
remove_classification_head_(snake_case_ )
snake_case__ : Any = create_rename_keys(snake_case_ , snake_case_ )
for src, dest in rename_keys:
rename_key(snake_case_ , snake_case_ , snake_case_ )
read_in_q_k_v(snake_case_ , snake_case_ , snake_case_ )
snake_case__ : Dict = "huggingface/label-files"
snake_case__ : List[str] = "imagenet-1k-id2label.json"
snake_case__ : Union[str, Any] = json.load(open(hf_hub_download(snake_case_ , snake_case_ , repo_type="dataset" ) , "r" ) )
snake_case__ : Any = {int(snake_case_ ): v for k, v in idalabel.items()}
snake_case__ : Optional[Any] = idalabel
snake_case__ : List[Any] = {v: k for k, v in idalabel.items()}
# load HuggingFace model
if vit_name[-5:] == "in21k":
snake_case__ : str = ViTHybridModel(snake_case_ ).eval()
else:
snake_case__ : Any = ViTHybridForImageClassification(snake_case_ ).eval()
model.load_state_dict(snake_case_ )
# create image processor
snake_case__ : int = create_transform(**resolve_data_config({} , model=snake_case_ ) )
snake_case__ : List[str] = transform.transforms
snake_case__ : Optional[int] = {
"bilinear": PILImageResampling.BILINEAR,
"bicubic": PILImageResampling.BICUBIC,
"nearest": PILImageResampling.NEAREST,
}
snake_case__ : str = ViTHybridImageProcessor(
do_resize=snake_case_ , size={"shortest_edge": timm_transforms[0].size} , resample=pillow_resamplings[timm_transforms[0].interpolation.value] , do_center_crop=snake_case_ , crop_size={"height": timm_transforms[1].size[0], "width": timm_transforms[1].size[1]} , do_normalize=snake_case_ , image_mean=timm_transforms[-1].mean.tolist() , image_std=timm_transforms[-1].std.tolist() , )
snake_case__ : Dict = prepare_img()
snake_case__ : Optional[Any] = transform(snake_case_ ).unsqueeze(0 )
snake_case__ : Dict = processor(snake_case_ , return_tensors="pt" ).pixel_values
# verify pixel values
assert torch.allclose(snake_case_ , snake_case_ )
# verify logits
with torch.no_grad():
snake_case__ : int = model(snake_case_ )
snake_case__ : Union[str, Any] = outputs.logits
print("Predicted class:" , logits.argmax(-1 ).item() )
if base_model:
snake_case__ : Optional[Any] = timm_model.forward_features(snake_case_ )
assert timm_pooled_output.shape == outputs.pooler_output.shape
assert torch.allclose(snake_case_ , outputs.pooler_output , atol=1E-3 )
else:
snake_case__ : Optional[Any] = timm_model(snake_case_ )
assert timm_logits.shape == outputs.logits.shape
assert torch.allclose(snake_case_ , outputs.logits , atol=1E-3 )
print("Looks ok!" )
if pytorch_dump_folder_path is not None:
Path(snake_case_ ).mkdir(exist_ok=snake_case_ )
print(F'''Saving model {vit_name} to {pytorch_dump_folder_path}''' )
model.save_pretrained(snake_case_ )
print(F'''Saving processor to {pytorch_dump_folder_path}''' )
processor.save_pretrained(snake_case_ )
if push_to_hub:
print(F'''Pushing model and processor to the hub {vit_name}''' )
model.push_to_hub(F'''ybelkada/{vit_name}''' )
processor.push_to_hub(F'''ybelkada/{vit_name}''' )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--vit_name""",
default="""vit_base_r50_s16_384""",
type=str,
help="""Name of the hybrid ViT timm model you'd like to convert.""",
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model directory."""
)
parser.add_argument(
"""--push_to_hub""", action="""store_true""", help="""Whether to upload the model to the HuggingFace hub."""
)
__lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_vit_checkpoint(args.vit_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 1 |
import math
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 100 ):
snake_case__ : Tuple = sum(i * i for i in range(1 , n + 1 ) )
snake_case__ : Optional[int] = int(math.pow(sum(range(1 , n + 1 ) ) , 2 ) )
return square_of_sum - sum_of_squares
if __name__ == "__main__":
print(f"{solution() = }")
| 25 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Tuple = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def _lowercase ( self : Dict ):
snake_case__ : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Union[str, Any] = get_activation("gelu" )
snake_case__ : int = get_activation("gelu_10" )
snake_case__ : Optional[int] = torch_builtin(__A )
snake_case__ : Dict = geluaa(__A )
snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowercase ( self : str ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__A ):
get_activation("bogus" )
with self.assertRaises(__A ):
get_activation(__A )
def _lowercase ( self : List[str] ):
snake_case__ : List[str] = get_activation("gelu" )
snake_case__ : Any = 1
snake_case__ : Union[str, Any] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
snake_case__ : int = acta.a
| 25 | 1 |
print((lambda quine: quine % quine)("""print((lambda quine: quine %% quine)(%r))"""))
| 25 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : str = value
elif weight_type == "weight_g":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : str = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
snake_case__ : str = []
snake_case__ : Optional[int] = fairseq_model.state_dict()
snake_case__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[Any] = "weight"
else:
snake_case__ : Optional[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
snake_case__ : Tuple = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : str = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ):
if config_path is not None:
snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = UniSpeechSatConfig()
snake_case__ : str = ""
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ )
else:
snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ )
snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25 | 1 |
__lowerCamelCase : Any = frozenset(
[
"""prompt""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
__lowerCamelCase : Any = frozenset(["""prompt""", """negative_prompt"""])
__lowerCamelCase : Optional[int] = frozenset([])
__lowerCamelCase : List[Any] = frozenset(["""image"""])
__lowerCamelCase : Union[str, Any] = frozenset(
[
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__lowerCamelCase : Union[str, Any] = frozenset(["""image"""])
__lowerCamelCase : Optional[Any] = frozenset(
[
"""prompt""",
"""image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
__lowerCamelCase : Union[str, Any] = frozenset(["""prompt""", """image""", """negative_prompt"""])
__lowerCamelCase : Tuple = frozenset(
[
# Text guided image variation with an image mask
"""prompt""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
]
)
__lowerCamelCase : Optional[Any] = frozenset(["""prompt""", """image""", """mask_image""", """negative_prompt"""])
__lowerCamelCase : List[Any] = frozenset(
[
# image variation with an image mask
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__lowerCamelCase : Any = frozenset(["""image""", """mask_image"""])
__lowerCamelCase : Union[str, Any] = frozenset(
[
"""example_image""",
"""image""",
"""mask_image""",
"""height""",
"""width""",
"""guidance_scale""",
]
)
__lowerCamelCase : Union[str, Any] = frozenset(["""example_image""", """image""", """mask_image"""])
__lowerCamelCase : Optional[Any] = frozenset(["""class_labels"""])
__lowerCamelCase : List[str] = frozenset(["""class_labels"""])
__lowerCamelCase : Union[str, Any] = frozenset(["""batch_size"""])
__lowerCamelCase : Union[str, Any] = frozenset([])
__lowerCamelCase : Optional[int] = frozenset(["""batch_size"""])
__lowerCamelCase : str = frozenset([])
__lowerCamelCase : List[Any] = frozenset(
[
"""prompt""",
"""audio_length_in_s""",
"""guidance_scale""",
"""negative_prompt""",
"""prompt_embeds""",
"""negative_prompt_embeds""",
"""cross_attention_kwargs""",
]
)
__lowerCamelCase : Any = frozenset(["""prompt""", """negative_prompt"""])
__lowerCamelCase : Tuple = frozenset(["""input_tokens"""])
__lowerCamelCase : Tuple = frozenset(["""input_tokens"""])
| 25 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , ):
if attention_mask is None:
snake_case__ : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case__ : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case__ : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ )
if decoder_head_mask is None:
snake_case__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
if cross_attn_head_mask is None:
snake_case__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Any , __A : List[str]=1_3 , __A : List[Any]=7 , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : str=9_9 , __A : Optional[Any]=1_6 , __A : Optional[Any]=2 , __A : Any=4 , __A : List[Any]=4 , __A : int="relu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[Any]=2_0 , __A : Optional[Any]=2 , __A : int=1 , __A : Union[str, Any]=0 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : List[str] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : List[str] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : int = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Tuple = eos_token_id
snake_case__ : Dict = pad_token_id
snake_case__ : str = bos_token_id
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.eos_token_id # Eos Token
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case__ : int = input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Union[str, Any] = self.get_config()
snake_case__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _lowercase ( self : Dict ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : Optional[Any] , __A : int , __A : Dict ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
snake_case__ : List[Any] = inputs_dict["input_ids"]
snake_case__ : Optional[Any] = inputs_dict["attention_mask"]
snake_case__ : Union[str, Any] = inputs_dict["head_mask"]
# first forward pass
snake_case__ : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
snake_case__, snake_case__ : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case__ : Tuple = model(__A , attention_mask=__A )["last_hidden_state"]
snake_case__ : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[
"last_hidden_state"
]
# select random slice
snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) )
def _lowercase ( self : str , __A : Dict , __A : Optional[Any] ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval()
snake_case__ : Union[str, Any] = model(**__A )
snake_case__ : Tuple = outputs.encoder_last_hidden_state
snake_case__ : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_encoder()
encoder.save_pretrained(__A )
snake_case__ : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_decoder()
decoder.save_pretrained(__A )
snake_case__ : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a_ = True
a_ = True
a_ = False
a_ = False
def _lowercase ( self : int , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowercase ( self : Tuple ):
snake_case__ : Any = MaMaaaModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__A )
def _lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
snake_case__, snake_case__ : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["missing_keys"] , [] )
def _lowercase ( self : Dict ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def _lowercase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case__ : str = model_class(__A )
model.to(__A )
model.eval()
snake_case__ : str = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
snake_case__ : Optional[Any] = inputs["input_ids"]
del inputs["input_ids"]
else:
snake_case__ : Union[str, Any] = inputs["input_ids"]
snake_case__ : List[str] = inputs.get("decoder_input_ids" , __A )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , __A )
snake_case__ : Tuple = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case__ : List[Any] = wte(__A )
else:
snake_case__ : Any = wte(__A )
snake_case__ : Optional[int] = wte(__A )
with torch.no_grad():
model(**__A )[0]
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Any = input_dict["input_ids"]
snake_case__ : int = input_ids.ne(1 ).to(__A )
snake_case__ : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ )
__lowerCamelCase : Optional[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ):
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def _lowercase ( self : Optional[int] ):
snake_case__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : str = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : Optional[Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
# change to intended input
snake_case__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : List[str] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
snake_case__ : List[Any] = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case__ : str = tokenizer(__A , padding=__A , return_tensors="pt" )
snake_case__ : Tuple = model.generate(
input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
snake_case__ : List[str] = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
snake_case__ : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 25 | 1 |
import json
import os
from functools import lru_cache
from typing import TYPE_CHECKING, List, Optional, Tuple
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : str = {
"""vocab_file""": """vocab.json""",
"""merges_file""": """merges.txt""",
"""tokenizer_config_file""": """tokenizer_config.json""",
}
__lowerCamelCase : List[Any] = {
"""vocab_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/vocab.json"""},
"""merges_file""": {"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/merges.txt"""},
"""tokenizer_config_file""": {
"""facebook/blenderbot-3B""": """https://huggingface.co/facebook/blenderbot-3B/resolve/main/tokenizer_config.json"""
},
}
__lowerCamelCase : int = {"""facebook/blenderbot-3B""": 128}
@lru_cache()
# Copied from transformers.models.roberta.tokenization_roberta.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : int = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
snake_case__ : Dict = bs[:]
snake_case__ : Tuple = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
snake_case__ : Union[str, Any] = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
snake_case__ : int = set()
snake_case__ : int = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : Optional[int] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
def __init__( self : Dict , __A : int , __A : Dict , __A : int="replace" , __A : Tuple="<s>" , __A : Dict="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Any="<unk>" , __A : List[Any]="<pad>" , __A : List[Any]="<mask>" , __A : Optional[int]=False , **__A : Union[str, Any] , ):
snake_case__ : Optional[int] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
snake_case__ : Optional[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
snake_case__ : Optional[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
snake_case__ : Optional[int] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
snake_case__ : Dict = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
snake_case__ : str = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="utf-8" ) as vocab_handle:
snake_case__ : Optional[int] = json.load(__A )
snake_case__ : Tuple = {v: k for k, v in self.encoder.items()}
snake_case__ : List[Any] = errors # how to handle errors in decoding
snake_case__ : Any = bytes_to_unicode()
snake_case__ : Dict = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="utf-8" ) as merges_handle:
snake_case__ : int = merges_handle.read().split("\n" )[1:-1]
snake_case__ : str = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Optional[Any] = {}
snake_case__ : Dict = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : List[Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.roberta.tokenization_roberta.RobertaTokenizer.vocab_size with Roberta->Blenderbot, RoBERTa->Blenderbot
def _lowercase ( self : List[str] ):
return len(self.encoder )
def _lowercase ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Any , __A : Tuple ):
if token in self.cache:
return self.cache[token]
snake_case__ : List[Any] = tuple(__A )
snake_case__ : List[Any] = get_pairs(__A )
if not pairs:
return token
while True:
snake_case__ : Any = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : str = bigram
snake_case__ : Dict = []
snake_case__ : Optional[int] = 0
while i < len(__A ):
try:
snake_case__ : List[str] = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : Tuple = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : Tuple = tuple(__A )
snake_case__ : Optional[Any] = new_word
if len(__A ) == 1:
break
else:
snake_case__ : Tuple = get_pairs(__A )
snake_case__ : int = " ".join(__A )
snake_case__ : List[str] = word
return word
def _lowercase ( self : Optional[int] , __A : Any ):
snake_case__ : Any = []
for token in re.findall(self.pat , __A ):
snake_case__ : int = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) )
return bpe_tokens
def _lowercase ( self : List[str] , __A : Tuple ):
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , __A : str ):
return self.decoder.get(__A )
def _lowercase ( self : List[Any] , __A : Dict ):
snake_case__ : Dict = "".join(__A )
snake_case__ : Tuple = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowercase ( self : List[str] , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : Union[str, Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : Optional[int] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
snake_case__ : Tuple = 0
with open(__A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case__ : int = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase ( self : Union[str, Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _lowercase ( self : List[str] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : List[str] = [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : int , __A : Tuple , __A : Union[str, Any]=False , **__A : Any ):
snake_case__ : int = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
snake_case__ : int = " " + text
return (text, kwargs)
def _lowercase ( self : Any , __A : List[int] , __A : Optional[List[int]] = None ):
return token_ids_a + [self.eos_token_id]
def _lowercase ( self : Any , __A : "Conversation" ):
snake_case__ : Tuple = []
for is_user, text in conversation.iter_texts():
if is_user:
# We need to space prefix as it's being done within blenderbot
inputs.append(" " + text )
else:
# Generated responses should contain them already.
inputs.append(__A )
snake_case__ : Optional[Any] = " ".join(__A )
snake_case__ : str = self.encode(__A )
if len(__A ) > self.model_max_length:
snake_case__ : Dict = input_ids[-self.model_max_length :]
logger.warning(f'''Trimmed input from conversation as it was longer than {self.model_max_length} tokens.''' )
return input_ids
| 25 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] ):
snake_case__ : Optional[int] = []
for part_id in partition_order:
snake_case__ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(snake_case_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Union[str, Any] = spark.range(100 ).repartition(1 )
snake_case__ : Any = Spark(snake_case_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[Any] = spark.range(10 ).repartition(2 )
snake_case__ : Optional[Any] = [1, 0]
snake_case__ : Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions.
snake_case__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case__, snake_case__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[int] = spark.range(10 ).repartition(1 )
snake_case__ : Union[str, Any] = SparkExamplesIterable(snake_case_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
snake_case__ : Union[str, Any] = lambda snake_case_ : x.reverse()
snake_case__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] )
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case__ : Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(100 ).repartition(1 )
snake_case__ : Union[str, Any] = Spark(snake_case_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 25 | 1 |
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
snake_case__ : Dict = 1_0
def _lowercase ( self : int ):
snake_case__ : str = [1, 2, 3, 4]
snake_case__ : Any = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(__A , self.block_size , 0 ) , __A )
def _lowercase ( self : Dict ):
snake_case__ : Optional[int] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
snake_case__ : Optional[Any] = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__A , self.block_size , 0 ) , __A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : str = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0, 1_1, 1_2, 1_3]
snake_case__ : int = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1_0]
self.assertEqual(truncate_or_pad(__A , self.block_size , 0 ) , __A )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : List[str] = "It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this."
snake_case__, snake_case__ : Any = process_story(__A )
self.assertEqual(__A , [] )
def _lowercase ( self : List[str] ):
snake_case__ : Any = ""
snake_case__, snake_case__ : Tuple = process_story(__A )
self.assertEqual(__A , [] )
self.assertEqual(__A , [] )
def _lowercase ( self : Dict ):
snake_case__ : int = (
"It was the year of Our Lord one thousand seven hundred and "
"seventy-five\n\nSpiritual revelations were conceded to England "
"at that favoured period, as at this.\n@highlight\n\nIt was the best of times"
)
snake_case__, snake_case__ : Tuple = process_story(__A )
snake_case__ : Optional[Any] = [
"It was the year of Our Lord one thousand seven hundred and seventy-five.",
"Spiritual revelations were conceded to England at that favoured period, as at this.",
]
self.assertEqual(__A , __A )
snake_case__ : Optional[Any] = ["It was the best of times."]
self.assertEqual(__A , __A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Tuple = torch.tensor([1, 2, 3, 4] )
snake_case__ : Optional[Any] = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(__A , 0 ).numpy() , expected.numpy() )
def _lowercase ( self : List[str] ):
snake_case__ : Tuple = torch.tensor([1, 2, 3, 4, 2_3, 2_3, 2_3] )
snake_case__ : List[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__A , 2_3 ).numpy() , expected.numpy() )
def _lowercase ( self : List[Any] ):
snake_case__ : List[Any] = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
snake_case__ : Optional[Any] = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(__A , 1 ).numpy() , expected.numpy() )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = 1_0_1
snake_case__ : List[Any] = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 1_0_1, 5, 6], [1, 1_0_1, 3, 4, 1_0_1, 6]] )
snake_case__ : Union[str, Any] = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
snake_case__ : Union[str, Any] = compute_token_type_ids(__A , __A )
np.testing.assert_array_equal(__A , __A )
| 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 1 |
import warnings
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import TensorType, is_torch_available, logging
__lowerCamelCase : Optional[Any] = logging.get_logger(__name__)
__lowerCamelCase : Any = {
"""facebook/bart-large""": """https://huggingface.co/facebook/bart-large/resolve/main/config.json""",
# See all BART models at https://huggingface.co/models?filter=bart
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "bart"
a_ = ["past_key_values"]
a_ = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : int , __A : Dict=5_0_2_6_5 , __A : Dict=1_0_2_4 , __A : Tuple=1_2 , __A : Union[str, Any]=4_0_9_6 , __A : Tuple=1_6 , __A : Tuple=1_2 , __A : str=4_0_9_6 , __A : Union[str, Any]=1_6 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[str]="gelu" , __A : Optional[Any]=1_0_2_4 , __A : Any=0.1 , __A : Optional[int]=0.0 , __A : int=0.0 , __A : Tuple=0.0_2 , __A : str=0.0 , __A : Optional[Any]=False , __A : Optional[int]=True , __A : List[Any]=3 , __A : Union[str, Any]=1 , __A : Dict=0 , __A : int=2 , __A : int=True , __A : Optional[Any]=2 , __A : Tuple=2 , **__A : Any , ):
snake_case__ : Optional[int] = vocab_size
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Tuple = d_model
snake_case__ : Any = encoder_ffn_dim
snake_case__ : Optional[Any] = encoder_layers
snake_case__ : Any = encoder_attention_heads
snake_case__ : List[str] = decoder_ffn_dim
snake_case__ : Union[str, Any] = decoder_layers
snake_case__ : int = decoder_attention_heads
snake_case__ : Optional[int] = dropout
snake_case__ : Union[str, Any] = attention_dropout
snake_case__ : str = activation_dropout
snake_case__ : Optional[Any] = activation_function
snake_case__ : Any = init_std
snake_case__ : Optional[int] = encoder_layerdrop
snake_case__ : List[str] = decoder_layerdrop
snake_case__ : int = classifier_dropout
snake_case__ : Tuple = use_cache
snake_case__ : int = encoder_layers
snake_case__ : Optional[int] = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
num_labels=__A , pad_token_id=__A , bos_token_id=__A , eos_token_id=__A , is_encoder_decoder=__A , decoder_start_token_id=__A , forced_eos_token_id=__A , **__A , )
# ensure backward compatibility for BART CNN models
if self.forced_bos_token_id is None and kwargs.get("force_bos_token_to_be_generated" , __A ):
snake_case__ : Dict = self.bos_token_id
warnings.warn(
f'''Please make sure the config includes `forced_bos_token_id={self.bos_token_id}` in future versions. '''
"The config can simply be saved and uploaded again to be fixed." )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
@property
def _lowercase ( self : Optional[int] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : List[Any] = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
snake_case__ : Optional[Any] = {0: "batch"}
snake_case__ : List[Any] = {0: "batch", 1: "past_decoder_sequence + sequence"}
else:
snake_case__ : Any = {0: "batch", 1: "decoder_sequence"}
snake_case__ : Union[str, Any] = {0: "batch", 1: "decoder_sequence"}
if self.use_past:
self.fill_with_past_key_values_(__A , direction="inputs" )
elif self.task == "causal-lm":
# TODO: figure this case out.
snake_case__ : Any = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
] )
if self.use_past:
snake_case__, snake_case__ : List[str] = self.num_layers
for i in range(__A ):
snake_case__ : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
snake_case__ : List[Any] = {0: "batch", 2: "past_sequence + sequence"}
else:
snake_case__ : int = OrderedDict(
[
("input_ids", {0: "batch", 1: "encoder_sequence"}),
("attention_mask", {0: "batch", 1: "encoder_sequence"}),
("decoder_input_ids", {0: "batch", 1: "decoder_sequence"}),
("decoder_attention_mask", {0: "batch", 1: "decoder_sequence"}),
] )
return common_inputs
@property
def _lowercase ( self : int ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : List[Any] = super().outputs
else:
snake_case__ : str = super(__A , self ).outputs
if self.use_past:
snake_case__, snake_case__ : Dict = self.num_layers
for i in range(__A ):
snake_case__ : Optional[Any] = {0: "batch", 2: "past_sequence + sequence"}
snake_case__ : Tuple = {0: "batch", 2: "past_sequence + sequence"}
return common_outputs
def _lowercase ( self : Any , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ):
snake_case__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , __A , __A , __A , __A )
# Generate decoder inputs
snake_case__ : Any = seq_length if not self.use_past else 1
snake_case__ : Union[str, Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , __A , __A , __A , __A )
snake_case__ : List[str] = {f'''decoder_{name}''': tensor for name, tensor in decoder_inputs.items()}
snake_case__ : str = dict(**__A , **__A )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
snake_case__, snake_case__ : List[Any] = common_inputs["input_ids"].shape
snake_case__ : Tuple = common_inputs["decoder_input_ids"].shape[1]
snake_case__, snake_case__ : Dict = self.num_attention_heads
snake_case__ : Dict = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : Optional[Any] = decoder_seq_length + 3
snake_case__ : List[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
snake_case__ : Dict = torch.cat(
[common_inputs["decoder_attention_mask"], torch.ones(__A , __A )] , dim=1 )
snake_case__ : Optional[int] = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
snake_case__, snake_case__ : Tuple = self.num_layers
snake_case__ : List[str] = min(__A , __A )
snake_case__ : Any = max(__A , __A ) - min_num_layers
snake_case__ : Optional[Any] = "encoder" if num_encoder_layers > num_decoder_layers else "decoder"
for _ in range(__A ):
common_inputs["past_key_values"].append(
(
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
torch.zeros(__A ),
) )
# TODO: test this.
snake_case__ : Any = encoder_shape if remaining_side_name == "encoder" else decoder_shape
for _ in range(__A , __A ):
common_inputs["past_key_values"].append((torch.zeros(__A ), torch.zeros(__A )) )
return common_inputs
def _lowercase ( self : List[Any] , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ):
snake_case__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , __A , __A , __A , __A )
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed." )
else:
import torch
snake_case__, snake_case__ : Union[str, Any] = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
snake_case__ : Any = seqlen + 2
snake_case__, snake_case__ : Union[str, Any] = self.num_layers
snake_case__, snake_case__ : List[str] = self.num_attention_heads
snake_case__ : List[Any] = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
snake_case__ : List[str] = common_inputs["attention_mask"].dtype
snake_case__ : Optional[int] = torch.cat(
[common_inputs["attention_mask"], torch.ones(__A , __A , dtype=__A )] , dim=1 )
snake_case__ : Optional[Any] = [
(torch.zeros(__A ), torch.zeros(__A )) for _ in range(__A )
]
return common_inputs
def _lowercase ( self : str , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ):
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
snake_case__ : Any = compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
snake_case__ : int = tokenizer.num_special_tokens_to_add(__A )
snake_case__ : Tuple = compute_effective_axis_dimension(
__A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=__A )
# Generate dummy inputs according to compute batch and sequence
snake_case__ : List[str] = [" ".join([tokenizer.unk_token] ) * seq_length] * batch_size
snake_case__ : Union[str, Any] = dict(tokenizer(__A , return_tensors=__A ) )
return common_inputs
def _lowercase ( self : Tuple , __A : PreTrainedTokenizer , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional[TensorType] = None , ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Dict = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
elif self.task == "causal-lm":
snake_case__ : Optional[Any] = self._generate_dummy_inputs_for_causal_lm(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
else:
snake_case__ : Any = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
__A , batch_size=__A , seq_length=__A , is_pair=__A , framework=__A )
return common_inputs
def _lowercase ( self : int , __A : Dict , __A : Dict , __A : Any , __A : Optional[int] ):
if self.task in ["default", "seq2seq-lm"]:
snake_case__ : Union[str, Any] = super()._flatten_past_key_values_(__A , __A , __A , __A )
else:
snake_case__ : Union[str, Any] = super(__A , self )._flatten_past_key_values_(
__A , __A , __A , __A )
| 25 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 25 | 1 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
if not isinstance(snake_case_ , snake_case_ ):
snake_case__ : int = F'''Input value of [number={number}] must be an integer'''
raise TypeError(snake_case_ )
if number < 0:
return False
snake_case__ : List[str] = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE ( snake_case_ : dict ):
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ):
snake_case__ : Optional[int] = XGBClassifier()
classifier.fit(snake_case_ , snake_case_ )
return classifier
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = load_iris()
snake_case__, snake_case__ : str = data_handling(snake_case_ )
snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split(
snake_case_ , snake_case_ , test_size=0.25 )
snake_case__ : Dict = iris["target_names"]
# Create an XGBoost Classifier from the training data
snake_case__ : Dict = xgboost(snake_case_ , snake_case_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 25 | 1 |
import gc
import threading
import time
import psutil
import torch
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : str ):
snake_case__ : List[str] = psutil.Process()
snake_case__ : int = False
def _lowercase ( self : int ):
snake_case__ : List[str] = -1
while True:
snake_case__ : List[Any] = max(self.process.memory_info().rss , self.cpu_memory_peak )
# can't sleep or will not catch the peak right (this comment is here on purpose)
if not self.peak_monitoring:
break
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = True
snake_case__ : int = threading.Thread(target=self.peak_monitor )
snake_case__ : Dict = True
self.thread.start()
def _lowercase ( self : Optional[int] ):
snake_case__ : str = False
self.thread.join()
return self.cpu_memory_peak
__lowerCamelCase : Dict = PeakCPUMemory()
def SCREAMING_SNAKE_CASE ( ):
# Time
snake_case__ : int = {"time": time.time()}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case__ : List[str] = psutil.Process().memory_info().rss
cpu_peak_tracker.start()
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case__ : Dict = torch.cuda.memory_allocated(snake_case_ )
torch.cuda.reset_peak_memory_stats()
return measures
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# Time
snake_case__ : Tuple = {"time": time.time() - start_measures["time"]}
gc.collect()
torch.cuda.empty_cache()
# CPU mem
snake_case__ : int = (psutil.Process().memory_info().rss - start_measures["cpu"]) / 2**20
snake_case__ : Dict = (cpu_peak_tracker.stop() - start_measures["cpu"]) / 2**20
# GPU mem
for i in range(torch.cuda.device_count() ):
snake_case__ : Dict = (torch.cuda.memory_allocated(snake_case_ ) - start_measures[str(snake_case_ )]) / 2**20
snake_case__ : Optional[Any] = (torch.cuda.max_memory_allocated(snake_case_ ) - start_measures[str(snake_case_ )]) / 2**20
return measures
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Optional[int] ):
print(F'''{description}:''' )
print(F'''- Time: {measures['time']:.2f}s''' )
for i in range(torch.cuda.device_count() ):
print(F'''- GPU {i} allocated: {measures[str(snake_case_ )]:.2f}MiB''' )
snake_case__ : Tuple = measures[F'''{i}-peak''']
print(F'''- GPU {i} peak: {peak:.2f}MiB''' )
print(F'''- CPU RAM allocated: {measures['cpu']:.2f}MiB''' )
print(F'''- CPU RAM peak: {measures['cpu-peak']:.2f}MiB''' )
| 25 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ):
snake_case__ : Tuple = args.log_outputs
snake_case__ : Union[str, Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
snake_case__ : List[str] = load_metric("wer" )
snake_case__ : List[str] = load_metric("cer" )
# compute metrics
snake_case__ : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
snake_case__ : List[str] = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
snake_case__ : Dict = F'''WER: {wer_result}\nCER: {cer_result}'''
print(snake_case_ )
with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case__ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt'''
snake_case__ : int = F'''log_{dataset_id}_targets.txt'''
with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Union[str, Any] , snake_case_ : Any ):
p.write(F'''{i}''' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(F'''{i}''' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case_ , with_indices=snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case__ : Optional[int] = re.sub(snake_case_ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case__ : Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
snake_case__ : Optional[int] = " ".join(text.split(snake_case_ ) )
return text
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# load dataset
snake_case__ : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case__ : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case__ : List[Any] = feature_extractor.sampling_rate
# resample audio
snake_case__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
snake_case__ : int = 0 if torch.cuda.is_available() else -1
snake_case__ : List[str] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Any ):
snake_case__ : Union[str, Any] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case__ : Optional[int] = prediction["text"]
snake_case__ : Optional[Any] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
snake_case__ : Any = dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__lowerCamelCase : str = parser.parse_args()
main(args)
| 25 | 1 |
__lowerCamelCase : Tuple = """ABCDEFGHIJKLMNOPQRSTUVWXYZ"""
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[Any] = input("Enter message: " )
snake_case__ : Any = input("Enter key [alphanumeric]: " )
snake_case__ : Optional[int] = input("Encrypt/Decrypt [e/d]: " )
if mode.lower().startswith("e" ):
snake_case__ : Any = "encrypt"
snake_case__ : List[Any] = encrypt_message(snake_case_ , snake_case_ )
elif mode.lower().startswith("d" ):
snake_case__ : Dict = "decrypt"
snake_case__ : List[str] = decrypt_message(snake_case_ , snake_case_ )
print(F'''\n{mode.title()}ed message:''' )
print(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str ):
return translate_message(snake_case_ , snake_case_ , "encrypt" )
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str ):
return translate_message(snake_case_ , snake_case_ , "decrypt" )
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : str ):
snake_case__ : Optional[int] = []
snake_case__ : List[str] = 0
snake_case__ : Union[str, Any] = key.upper()
for symbol in message:
snake_case__ : Any = LETTERS.find(symbol.upper() )
if num != -1:
if mode == "encrypt":
num += LETTERS.find(key[key_index] )
elif mode == "decrypt":
num -= LETTERS.find(key[key_index] )
num %= len(snake_case_ )
if symbol.isupper():
translated.append(LETTERS[num] )
elif symbol.islower():
translated.append(LETTERS[num].lower() )
key_index += 1
if key_index == len(snake_case_ ):
snake_case__ : Optional[Any] = 0
else:
translated.append(snake_case_ )
return "".join(snake_case_ )
if __name__ == "__main__":
main()
| 25 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
a_ = Features({"text": Value("string" )} )
a_ = Features({"labels": ClassLabel} )
a_ = "text"
a_ = "labels"
def _lowercase ( self : Tuple , __A : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
snake_case__ : Any = copy.deepcopy(self )
snake_case__ : Optional[Any] = self.label_schema.copy()
snake_case__ : List[str] = features[self.label_column]
snake_case__ : Dict = label_schema
return task_template
@property
def _lowercase ( self : Tuple ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 25 | 1 |
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""microsoft/beit-base-patch16-224-pt22k""": (
"""https://huggingface.co/microsoft/beit-base-patch16-224-pt22k/resolve/main/config.json"""
),
# See all BEiT models at https://huggingface.co/models?filter=beit
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "beit"
def __init__( self : int , __A : str=8_1_9_2 , __A : List[Any]=7_6_8 , __A : Dict=1_2 , __A : List[Any]=1_2 , __A : Any=3_0_7_2 , __A : Optional[Any]="gelu" , __A : str=0.0 , __A : Tuple=0.0 , __A : Any=0.0_2 , __A : List[Any]=1e-1_2 , __A : Optional[Any]=2_2_4 , __A : Tuple=1_6 , __A : Union[str, Any]=3 , __A : int=False , __A : Any=False , __A : str=False , __A : Union[str, Any]=False , __A : Optional[int]=0.1 , __A : Any=0.1 , __A : List[Any]=True , __A : int=[3, 5, 7, 1_1] , __A : Optional[Any]=[1, 2, 3, 6] , __A : Optional[Any]=True , __A : str=0.4 , __A : str=2_5_6 , __A : Dict=1 , __A : Any=False , __A : Optional[Any]=2_5_5 , **__A : Union[str, Any] , ):
super().__init__(**__A )
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : Any = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Optional[int] = intermediate_size
snake_case__ : int = hidden_act
snake_case__ : List[Any] = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : int = initializer_range
snake_case__ : Any = layer_norm_eps
snake_case__ : int = image_size
snake_case__ : Union[str, Any] = patch_size
snake_case__ : str = num_channels
snake_case__ : Dict = use_mask_token
snake_case__ : str = use_absolute_position_embeddings
snake_case__ : List[Any] = use_relative_position_bias
snake_case__ : List[str] = use_shared_relative_position_bias
snake_case__ : int = layer_scale_init_value
snake_case__ : Union[str, Any] = drop_path_rate
snake_case__ : Dict = use_mean_pooling
# decode head attributes (semantic segmentation)
snake_case__ : Optional[int] = out_indices
snake_case__ : Union[str, Any] = pool_scales
# auxiliary head attributes (semantic segmentation)
snake_case__ : str = use_auxiliary_head
snake_case__ : Optional[Any] = auxiliary_loss_weight
snake_case__ : List[Any] = auxiliary_channels
snake_case__ : Any = auxiliary_num_convs
snake_case__ : Optional[Any] = auxiliary_concat_input
snake_case__ : Union[str, Any] = semantic_loss_ignore_index
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = version.parse("1.11" )
@property
def _lowercase ( self : Union[str, Any] ):
return OrderedDict(
[
("pixel_values", {0: "batch", 1: "num_channels", 2: "height", 3: "width"}),
] )
@property
def _lowercase ( self : Union[str, Any] ):
return 1e-4
| 25 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_vision_model"
def __init__( self : List[Any] , __A : Dict=1_4_0_8 , __A : Tuple=6_1_4_4 , __A : str=3_9 , __A : int=1_6 , __A : str=2_2_4 , __A : Any=1_4 , __A : Dict="gelu" , __A : List[Any]=1e-6 , __A : Any=0.0 , __A : List[Any]=1e-1_0 , __A : Union[str, Any]=True , **__A : Tuple , ):
super().__init__(**__A )
snake_case__ : List[str] = hidden_size
snake_case__ : Optional[int] = intermediate_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : str = patch_size
snake_case__ : int = image_size
snake_case__ : int = initializer_range
snake_case__ : Optional[int] = attention_dropout
snake_case__ : str = layer_norm_eps
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Tuple = qkv_bias
@classmethod
def _lowercase ( cls : List[str] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : str = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : Union[str, Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_qformer"
def __init__( self : Any , __A : Union[str, Any]=3_0_5_2_2 , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=1_2 , __A : Dict=1_2 , __A : Dict=3_0_7_2 , __A : List[str]="gelu" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Any=5_1_2 , __A : Optional[int]=0.0_2 , __A : List[str]=1e-1_2 , __A : Any=0 , __A : Optional[Any]="absolute" , __A : str=2 , __A : Any=1_4_0_8 , **__A : List[str] , ):
super().__init__(pad_token_id=__A , **__A )
snake_case__ : Dict = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : int = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : Dict = cross_attention_frequency
snake_case__ : List[str] = encoder_hidden_size
@classmethod
def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Optional[int] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : Tuple = cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : List[Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip"
a_ = True
def __init__( self : List[str] , __A : Optional[Any]=None , __A : Tuple=None , __A : Optional[int]=None , __A : Optional[Any]=3_2 , **__A : Optional[int] ):
super().__init__(**__A )
if vision_config is None:
snake_case__ : Any = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
snake_case__ : Optional[Any] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
snake_case__ : Optional[int] = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
snake_case__ : List[Any] = InstructBlipVisionConfig(**__A )
snake_case__ : Union[str, Any] = InstructBlipQFormerConfig(**__A )
snake_case__ : Dict = text_config["model_type"] if "model_type" in text_config else "opt"
snake_case__ : List[Any] = CONFIG_MAPPING[text_model_type](**__A )
snake_case__ : Union[str, Any] = self.text_config.tie_word_embeddings
snake_case__ : Tuple = self.text_config.is_encoder_decoder
snake_case__ : str = num_query_tokens
snake_case__ : Dict = self.vision_config.hidden_size
snake_case__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case__ : int = 1.0
snake_case__ : Optional[int] = 0.0_2
@classmethod
def _lowercase ( cls : List[str] , __A : InstructBlipVisionConfig , __A : InstructBlipQFormerConfig , __A : PretrainedConfig , **__A : int , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def _lowercase ( self : Optional[int] ):
snake_case__ : Any = copy.deepcopy(self.__dict__ )
snake_case__ : Optional[Any] = self.vision_config.to_dict()
snake_case__ : List[str] = self.qformer_config.to_dict()
snake_case__ : List[Any] = self.text_config.to_dict()
snake_case__ : List[Any] = self.__class__.model_type
return output
| 25 | 1 |
import unittest
import numpy as np
import torch
from diffusers import KarrasVePipeline, KarrasVeScheduler, UNetaDModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@property
def _lowercase ( self : str ):
torch.manual_seed(0 )
snake_case__ : Dict = UNetaDModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=3 , out_channels=3 , down_block_types=("DownBlock2D", "AttnDownBlock2D") , up_block_types=("AttnUpBlock2D", "UpBlock2D") , )
return model
def _lowercase ( self : Tuple ):
snake_case__ : Union[str, Any] = self.dummy_uncond_unet
snake_case__ : Any = KarrasVeScheduler()
snake_case__ : int = KarrasVePipeline(unet=__A , scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : List[Any] = torch.manual_seed(0 )
snake_case__ : str = pipe(num_inference_steps=2 , generator=__A , output_type="numpy" ).images
snake_case__ : Union[str, Any] = torch.manual_seed(0 )
snake_case__ : int = pipe(num_inference_steps=2 , generator=__A , output_type="numpy" , return_dict=__A )[0]
snake_case__ : Dict = image[0, -3:, -3:, -1]
snake_case__ : str = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : Tuple = np.array([0.0, 1.0, 0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 0.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
@slow
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Union[str, Any] ):
snake_case__ : int = "google/ncsnpp-celebahq-256"
snake_case__ : List[str] = UNetaDModel.from_pretrained(__A )
snake_case__ : int = KarrasVeScheduler()
snake_case__ : Any = KarrasVePipeline(unet=__A , scheduler=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Tuple = pipe(num_inference_steps=2_0 , generator=__A , output_type="numpy" ).images
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_5_6, 2_5_6, 3)
snake_case__ : Any = np.array([0.5_7_8, 0.5_8_1_1, 0.5_9_2_4, 0.5_8_0_9, 0.5_8_7, 0.5_8_8_6, 0.5_8_6_1, 0.5_8_0_2, 0.5_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 25 |
def SCREAMING_SNAKE_CASE ( snake_case_ : list ):
if len(snake_case_ ) <= 1:
return lst
snake_case__ : List[Any] = 1
while i < len(snake_case_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
snake_case__, snake_case__ : Tuple = lst[i], lst[i - 1]
i -= 1
if i == 0:
snake_case__ : Union[str, Any] = 1
return lst
if __name__ == "__main__":
__lowerCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 25 | 1 |
import inspect
import os
import sys
import unittest
import accelerate
from accelerate.test_utils import execute_subprocess_async, require_tpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : str ):
snake_case__ : str = inspect.getfile(accelerate.test_utils )
snake_case__ : Optional[Any] = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["scripts", "test_script.py"] )
snake_case__ : List[str] = os.path.sep.join(inspect.getfile(self.__class__ ).split(os.path.sep )[:-1] )
@require_tpu
def _lowercase ( self : int ):
snake_case__ : Any = f'''
{self.test_dir}/xla_spawn.py
--num_cores 8
{self.test_file_path}
'''.split()
snake_case__ : Tuple = [sys.executable] + distributed_args
execute_subprocess_async(__A , env=os.environ.copy() )
| 25 |
from __future__ import annotations
import time
__lowerCamelCase : str = list[tuple[int, int]]
__lowerCamelCase : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ):
snake_case__ : Optional[int] = pos_x
snake_case__ : Dict = pos_y
snake_case__ : int = (pos_y, pos_x)
snake_case__ : Optional[int] = goal_x
snake_case__ : Tuple = goal_y
snake_case__ : str = parent
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ):
snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A )
snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A )
snake_case__ : int = [self.start]
snake_case__ : Union[str, Any] = False
def _lowercase ( self : Dict ):
while self.node_queue:
snake_case__ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ : Optional[Any] = True
return self.retrace_path(__A )
snake_case__ : int = self.get_successors(__A )
for node in successors:
self.node_queue.append(__A )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Union[str, Any] , __A : Node ):
snake_case__ : str = []
for action in delta:
snake_case__ : str = parent.pos_x + action[1]
snake_case__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) )
return successors
def _lowercase ( self : Optional[Any] , __A : Node | None ):
snake_case__ : Tuple = node
snake_case__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : Tuple = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : str , __A : int ):
snake_case__ : str = BreadthFirstSearch(__A , __A )
snake_case__ : int = BreadthFirstSearch(__A , __A )
snake_case__ : Tuple = False
def _lowercase ( self : Optional[Any] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 )
snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ : List[str] = True
return self.retrace_bidirectional_path(
__A , __A )
snake_case__ : Union[str, Any] = current_bwd_node
snake_case__ : Dict = current_fwd_node
snake_case__ : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(__A ),
self.bwd_bfs: self.bwd_bfs.get_successors(__A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Any , __A : Node , __A : Node ):
snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A )
snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowerCamelCase : str = (0, 0)
__lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : Any = time.time()
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal)
__lowerCamelCase : str = bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
__lowerCamelCase : Optional[Any] = time.time()
__lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
__lowerCamelCase : str = bd_bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 25 | 1 |
import string
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Union[str, Any] = ""
for i in sequence:
snake_case__ : int = ord(snake_case_ )
if 65 <= extract <= 90:
output += chr(155 - extract )
elif 97 <= extract <= 122:
output += chr(219 - extract )
else:
output += i
return output
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : str = string.ascii_letters
snake_case__ : Optional[int] = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(snake_case_ )] if c in letters else c for c in sequence )
def SCREAMING_SNAKE_CASE ( ):
from timeit import timeit
print("Running performance benchmarks..." )
snake_case__ : str = "from string import printable ; from __main__ import atbash, atbash_slow"
print(F'''> atbash_slow(): {timeit('atbash_slow(printable)' , setup=snake_case_ )} seconds''' )
print(F'''> atbash(): {timeit('atbash(printable)' , setup=snake_case_ )} seconds''' )
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"{example} encrypted in atbash: {atbash(example)}")
benchmark()
| 25 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __A : Dict , __A : int=7 , __A : Optional[Any]=3 , __A : List[str]=3_0 , __A : List[Any]=4_0_0 , __A : Union[str, Any]=True , __A : List[Any]=None , __A : Optional[Any]=True , __A : Tuple=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : Any=1 / 2_5_5 , __A : Optional[int]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : str = min_resolution
snake_case__ : Tuple = max_resolution
snake_case__ : List[Any] = do_resize
snake_case__ : Dict = size
snake_case__ : List[str] = do_normalize
snake_case__ : Optional[int] = image_mean
snake_case__ : Optional[int] = image_std
snake_case__ : Any = do_rescale
snake_case__ : Optional[int] = rescale_factor
snake_case__ : int = do_pad
def _lowercase ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[int] , __A : Dict , __A : List[Any]=False ):
if not batched:
snake_case__ : List[str] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : Tuple = image.size
else:
snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case__ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case__ : List[Any] = self.size["shortest_edge"]
snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Dict = self.size["shortest_edge"]
snake_case__ : Dict = self.size["shortest_edge"]
else:
snake_case__ : str = []
for image in image_inputs:
snake_case__, snake_case__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Dict = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : Tuple = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : int ):
snake_case__ : Tuple = ConditionalDetrImageProcessingTester(self )
@property
def _lowercase ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Any ):
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : List[str] ):
snake_case__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : Union[str, Any] ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : str = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : List[Any] ):
# prepare image and target
snake_case__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Union[str, Any] = json.loads(f.read() )
snake_case__ : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : Tuple = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case__ : int = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : str ):
# prepare image, target and masks_path
snake_case__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : int = json.loads(f.read() )
snake_case__ : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : Optional[int] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : str = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 1 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
__lowerCamelCase : Tuple = logging.get_logger(__name__)
__lowerCamelCase : Any = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "van"
def __init__( self : Union[str, Any] , __A : Dict=2_2_4 , __A : Tuple=3 , __A : Optional[int]=[7, 3, 3, 3] , __A : Optional[Any]=[4, 2, 2, 2] , __A : Dict=[6_4, 1_2_8, 3_2_0, 5_1_2] , __A : Tuple=[3, 3, 1_2, 3] , __A : Optional[Any]=[8, 8, 4, 4] , __A : Any="gelu" , __A : Any=0.0_2 , __A : List[Any]=1e-6 , __A : Optional[int]=1e-2 , __A : str=0.0 , __A : str=0.0 , **__A : List[Any] , ):
super().__init__(**__A )
snake_case__ : List[str] = image_size
snake_case__ : Optional[Any] = num_channels
snake_case__ : int = patch_sizes
snake_case__ : Dict = strides
snake_case__ : Tuple = hidden_sizes
snake_case__ : Optional[Any] = depths
snake_case__ : str = mlp_ratios
snake_case__ : Union[str, Any] = hidden_act
snake_case__ : Any = initializer_range
snake_case__ : List[str] = layer_norm_eps
snake_case__ : Dict = layer_scale_init_value
snake_case__ : Any = drop_path_rate
snake_case__ : List[Any] = dropout_rate
| 25 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__lowerCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__lowerCamelCase : str = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__lowerCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def _lowercase ( self : Union[str, Any] , __A : Dict , __A : List[str] , __A : int=None , __A : List[Any]=None , __A : Optional[int]=None , __A : List[Any]=None , __A : Union[str, Any]="auto" , __A : Optional[Any]=-1 , __A : Optional[Any]=0.9 , __A : Any=5 , __A : List[Any]=5_0_0 , __A : Tuple="gpt2-large" , __A : Optional[Any]=-1 , __A : str=1_0_2_4 , __A : Tuple=2_5 , __A : str=5 , __A : Optional[int]=True , __A : Any=2_5 , ):
snake_case__ : List[Any] = compute_mauve(
p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , )
return out
| 25 | 1 |
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[str] ):
snake_case__ : Dict = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : str , snake_case_ : Optional[int] ):
snake_case__ : Optional[int] = 0
while b > 0:
if b & 1:
snake_case__ : Optional[Any] = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 25 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCamelCase : Union[str, Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCamelCase : List[Any] = concatenate_datasets
__lowerCamelCase : List[str] = DownloadConfig
__lowerCamelCase : Union[str, Any] = DownloadManager
__lowerCamelCase : str = DownloadMode
__lowerCamelCase : Union[str, Any] = DownloadConfig
__lowerCamelCase : List[str] = DownloadMode
__lowerCamelCase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 25 | 1 |
from .constants import (
MODEL_NAME,
OPTIMIZER_NAME,
RNG_STATE_NAME,
SAFE_WEIGHTS_INDEX_NAME,
SAFE_WEIGHTS_NAME,
SCALER_NAME,
SCHEDULER_NAME,
TORCH_LAUNCH_PARAMS,
WEIGHTS_INDEX_NAME,
WEIGHTS_NAME,
)
from .dataclasses import (
BnbQuantizationConfig,
ComputeEnvironment,
CustomDtype,
DeepSpeedPlugin,
DistributedDataParallelKwargs,
DistributedType,
DynamoBackend,
FPaRecipeKwargs,
FullyShardedDataParallelPlugin,
GradientAccumulationPlugin,
GradScalerKwargs,
InitProcessGroupKwargs,
KwargsHandler,
LoggerType,
MegatronLMPlugin,
PrecisionType,
ProjectConfiguration,
RNGType,
SageMakerDistributedType,
TensorInformation,
TorchDynamoPlugin,
)
from .environment import get_int_from_env, parse_choice_from_env, parse_flag_from_env
from .imports import (
get_ccl_version,
is_abit_bnb_available,
is_abit_bnb_available,
is_aim_available,
is_bfaa_available,
is_bnb_available,
is_botoa_available,
is_ccl_available,
is_comet_ml_available,
is_datasets_available,
is_deepspeed_available,
is_fpa_available,
is_ipex_available,
is_megatron_lm_available,
is_mlflow_available,
is_mps_available,
is_npu_available,
is_rich_available,
is_safetensors_available,
is_sagemaker_available,
is_tensorboard_available,
is_tpu_available,
is_transformers_available,
is_wandb_available,
is_xpu_available,
)
from .modeling import (
check_device_map,
check_tied_parameters_in_config,
check_tied_parameters_on_same_device,
compute_module_sizes,
convert_file_size_to_int,
dtype_byte_size,
find_tied_parameters,
get_balanced_memory,
get_max_layer_size,
get_max_memory,
get_mixed_precision_context_manager,
id_tensor_storage,
infer_auto_device_map,
load_checkpoint_in_model,
load_offloaded_weights,
load_state_dict,
named_module_tensors,
retie_parameters,
set_module_tensor_to_device,
shard_checkpoint,
)
from .offload import (
OffloadedWeightsLoader,
PrefixedDataset,
extract_submodules_state_dict,
load_offloaded_weight,
offload_state_dict,
offload_weight,
save_offload_index,
)
from .operations import (
broadcast,
broadcast_object_list,
concatenate,
convert_outputs_to_fpaa,
convert_to_fpaa,
find_batch_size,
find_device,
gather,
gather_object,
get_data_structure,
honor_type,
initialize_tensors,
is_namedtuple,
is_tensor_information,
is_torch_tensor,
listify,
pad_across_processes,
recursively_apply,
reduce,
send_to_device,
slice_tensors,
)
from .versions import compare_versions, is_torch_version
if is_deepspeed_available():
from .deepspeed import (
DeepSpeedEngineWrapper,
DeepSpeedOptimizerWrapper,
DeepSpeedSchedulerWrapper,
DummyOptim,
DummyScheduler,
HfDeepSpeedConfig,
)
from .bnb import has_abit_bnb_layers, load_and_quantize_model
from .fsdp_utils import load_fsdp_model, load_fsdp_optimizer, save_fsdp_model, save_fsdp_optimizer
from .launch import (
PrepareForLaunch,
_filter_args,
prepare_deepspeed_cmd_env,
prepare_multi_gpu_env,
prepare_sagemager_args_inputs,
prepare_simple_launcher_cmd_env,
prepare_tpu,
)
from .megatron_lm import (
AbstractTrainStep,
BertTrainStep,
GPTTrainStep,
MegatronEngine,
MegatronLMDummyDataLoader,
MegatronLMDummyScheduler,
MegatronLMOptimizerWrapper,
MegatronLMSchedulerWrapper,
TaTrainStep,
avg_losses_across_data_parallel_group,
gather_across_data_parallel_groups,
)
from .megatron_lm import initialize as megatron_lm_initialize
from .megatron_lm import prepare_data_loader as megatron_lm_prepare_data_loader
from .megatron_lm import prepare_model as megatron_lm_prepare_model
from .megatron_lm import prepare_optimizer as megatron_lm_prepare_optimizer
from .megatron_lm import prepare_scheduler as megatron_lm_prepare_scheduler
from .memory import find_executable_batch_size, release_memory
from .other import (
extract_model_from_parallel,
get_pretty_name,
is_port_in_use,
merge_dicts,
patch_environment,
save,
wait_for_everyone,
write_basic_config,
)
from .random import set_seed, synchronize_rng_state, synchronize_rng_states
from .torch_xla import install_xla
from .tqdm import tqdm
from .transformer_engine import convert_model, has_transformer_engine_layers
| 25 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : str = [True] * limit
snake_case__ : str = False
snake_case__ : str = False
snake_case__ : str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
snake_case__ : Optional[Any] = i * 2
while index < limit:
snake_case__ : Union[str, Any] = False
snake_case__ : Any = index + i
snake_case__ : Optional[Any] = [2]
for i in range(3 , snake_case_ , 2 ):
if is_prime[i]:
primes.append(snake_case_ )
return primes
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000000 ):
snake_case__ : Optional[int] = prime_sieve(snake_case_ )
snake_case__ : List[Any] = 0
snake_case__ : List[str] = 0
for i in range(len(snake_case_ ) ):
for j in range(i + length , len(snake_case_ ) ):
snake_case__ : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
snake_case__ : Tuple = j - i
snake_case__ : str = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 25 | 1 |
import argparse
import struct
import unittest
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Any , __A : bytes ):
snake_case__ : Optional[Any] = data
# Initialize hash values
snake_case__ : Union[str, Any] = [
0x6A09E667,
0xBB67AE85,
0x3C6EF372,
0xA54FF53A,
0x510E527F,
0x9B05688C,
0x1F83D9AB,
0x5BE0CD19,
]
# Initialize round constants
snake_case__ : List[str] = [
0x428A2F98,
0x71374491,
0xB5C0FBCF,
0xE9B5DBA5,
0x3956C25B,
0x59F111F1,
0x923F82A4,
0xAB1C5ED5,
0xD807AA98,
0x12835B01,
0x243185BE,
0x550C7DC3,
0x72BE5D74,
0x80DEB1FE,
0x9BDC06A7,
0xC19BF174,
0xE49B69C1,
0xEFBE4786,
0x0FC19DC6,
0x240CA1CC,
0x2DE92C6F,
0x4A7484AA,
0x5CB0A9DC,
0x76F988DA,
0x983E5152,
0xA831C66D,
0xB00327C8,
0xBF597FC7,
0xC6E00BF3,
0xD5A79147,
0x06CA6351,
0x14292967,
0x27B70A85,
0x2E1B2138,
0x4D2C6DFC,
0x53380D13,
0x650A7354,
0x766A0ABB,
0x81C2C92E,
0x92722C85,
0xA2BFE8A1,
0xA81A664B,
0xC24B8B70,
0xC76C51A3,
0xD192E819,
0xD6990624,
0xF40E3585,
0x106AA070,
0x19A4C116,
0x1E376C08,
0x2748774C,
0x34B0BCB5,
0x391C0CB3,
0x4ED8AA4A,
0x5B9CCA4F,
0x682E6FF3,
0x748F82EE,
0x78A5636F,
0x84C87814,
0x8CC70208,
0x90BEFFFA,
0xA4506CEB,
0xBEF9A3F7,
0xC67178F2,
]
snake_case__ : Tuple = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def _lowercase ( __A : bytes ):
snake_case__ : Optional[int] = B"\x80" + (B"\x00" * (6_3 - (len(__A ) + 8) % 6_4))
snake_case__ : List[str] = struct.pack(">Q" , (len(__A ) * 8) )
return data + padding + big_endian_integer
def _lowercase ( self : Optional[int] ):
# Convert into blocks of 64 bytes
snake_case__ : str = [
self.preprocessed_data[x : x + 6_4]
for x in range(0 , len(self.preprocessed_data ) , 6_4 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
snake_case__ : Any = list(struct.unpack(">16L" , __A ) )
# add 48 0-ed integers
words += [0] * 4_8
snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ : Optional[int] = self.hashes
for index in range(0 , 6_4 ):
if index > 1_5:
# modify the zero-ed indexes at the end of the array
snake_case__ : str = (
self.ror(words[index - 1_5] , 7 )
^ self.ror(words[index - 1_5] , 1_8 )
^ (words[index - 1_5] >> 3)
)
snake_case__ : Tuple = (
self.ror(words[index - 2] , 1_7 )
^ self.ror(words[index - 2] , 1_9 )
^ (words[index - 2] >> 1_0)
)
snake_case__ : List[Any] = (
words[index - 1_6] + sa + words[index - 7] + sa
) % 0x100000000
# Compression
snake_case__ : Optional[int] = self.ror(__A , 6 ) ^ self.ror(__A , 1_1 ) ^ self.ror(__A , 2_5 )
snake_case__ : str = (e & f) ^ ((~e & 0xFFFFFFFF) & g)
snake_case__ : str = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0x100000000
snake_case__ : Any = self.ror(__A , 2 ) ^ self.ror(__A , 1_3 ) ^ self.ror(__A , 2_2 )
snake_case__ : Optional[int] = (a & b) ^ (a & c) ^ (b & c)
snake_case__ : List[Any] = (sa + maj) % 0x100000000
snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ : Optional[int] = (
g,
f,
e,
((d + tempa) % 0x100000000),
c,
b,
a,
((tempa + tempa) % 0x100000000),
)
snake_case__ : Any = [a, b, c, d, e, f, g, h]
# Modify final values
snake_case__ : List[str] = [
((element + mutated_hash_values[index]) % 0x100000000)
for index, element in enumerate(self.hashes )
]
snake_case__ : Optional[int] = "".join([hex(__A )[2:].zfill(8 ) for value in self.hashes] )
def _lowercase ( self : List[Any] , __A : int , __A : int ):
return 0xFFFFFFFF & (value << (3_2 - rotations)) | (value >> rotations)
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Any ):
import hashlib
snake_case__ : int = bytes("Test String" , "utf-8" )
self.assertEqual(SHAaaa(__A ).hash , hashlib.shaaaa(__A ).hexdigest() )
def SCREAMING_SNAKE_CASE ( ):
import doctest
doctest.testmod()
snake_case__ : int = argparse.ArgumentParser()
parser.add_argument(
"-s" , "--string" , dest="input_string" , default="Hello World!! Welcome to Cryptography" , help="Hash the string" , )
parser.add_argument(
"-f" , "--file" , dest="input_file" , help="Hash contents of a file" )
snake_case__ : Dict = parser.parse_args()
snake_case__ : Optional[int] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , "rb" ) as f:
snake_case__ : Tuple = f.read()
else:
snake_case__ : List[str] = bytes(snake_case_ , "utf-8" )
print(SHAaaa(snake_case_ ).hash )
if __name__ == "__main__":
main()
| 25 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Optional[Any] = parent
snake_case__ : str = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : Optional[Any] = min_resolution
snake_case__ : List[str] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : str = size
snake_case__ : str = do_normalize
snake_case__ : Optional[Any] = image_mean
snake_case__ : List[str] = image_std
snake_case__ : List[str] = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : Tuple = do_pad
def _lowercase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ):
if not batched:
snake_case__ : List[Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : str = image.size
else:
snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Any = int(self.size["shortest_edge"] * h / w )
snake_case__ : Any = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Any = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Tuple = self.size["shortest_edge"]
snake_case__ : int = self.size["shortest_edge"]
else:
snake_case__ : Any = []
for image in image_inputs:
snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : int = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Tuple ):
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : str ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : int ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[Any] ):
# prepare image and target
snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Tuple = json.loads(f.read() )
snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : str = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image, target and masks_path
snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __A : Dict , __A : int=7 , __A : Optional[Any]=3 , __A : List[str]=3_0 , __A : List[Any]=4_0_0 , __A : Union[str, Any]=True , __A : List[Any]=None , __A : Optional[Any]=True , __A : Tuple=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : Any=1 / 2_5_5 , __A : Optional[int]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : str = min_resolution
snake_case__ : Tuple = max_resolution
snake_case__ : List[Any] = do_resize
snake_case__ : Dict = size
snake_case__ : List[str] = do_normalize
snake_case__ : Optional[int] = image_mean
snake_case__ : Optional[int] = image_std
snake_case__ : Any = do_rescale
snake_case__ : Optional[int] = rescale_factor
snake_case__ : int = do_pad
def _lowercase ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[int] , __A : Dict , __A : List[Any]=False ):
if not batched:
snake_case__ : List[str] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : Tuple = image.size
else:
snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case__ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case__ : List[Any] = self.size["shortest_edge"]
snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Dict = self.size["shortest_edge"]
snake_case__ : Dict = self.size["shortest_edge"]
else:
snake_case__ : str = []
for image in image_inputs:
snake_case__, snake_case__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Dict = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : Tuple = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : int ):
snake_case__ : Tuple = ConditionalDetrImageProcessingTester(self )
@property
def _lowercase ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Any ):
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : List[str] ):
snake_case__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : Union[str, Any] ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : str = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : List[Any] ):
# prepare image and target
snake_case__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Union[str, Any] = json.loads(f.read() )
snake_case__ : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : Tuple = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case__ : int = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : str ):
# prepare image, target and masks_path
snake_case__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : int = json.loads(f.read() )
snake_case__ : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : Optional[int] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : str = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCamelCase : Tuple = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
snake_case__ : Optional[int] = bs[:]
snake_case__ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
snake_case__ : Dict = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__ : Dict = set()
snake_case__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : List[Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ):
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="utf-8" ) as vocab_handle:
snake_case__ : Any = json.load(__A )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
snake_case__ : Union[str, Any] = errors # how to handle errors in decoding
snake_case__ : Any = bytes_to_unicode()
snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="utf-8" ) as merges_handle:
snake_case__ : str = merges_handle.read().split("\n" )[1:-1]
snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Optional[int] = {}
snake_case__ : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _lowercase ( self : List[Any] ):
return len(self.encoder )
def _lowercase ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Optional[Any] , __A : Optional[int] ):
if token in self.cache:
return self.cache[token]
snake_case__ : Union[str, Any] = tuple(__A )
snake_case__ : List[Any] = get_pairs(__A )
if not pairs:
return token
while True:
snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : Dict = bigram
snake_case__ : str = []
snake_case__ : Union[str, Any] = 0
while i < len(__A ):
try:
snake_case__ : Dict = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : str = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : str = tuple(__A )
snake_case__ : int = new_word
if len(__A ) == 1:
break
else:
snake_case__ : List[str] = get_pairs(__A )
snake_case__ : List[Any] = " ".join(__A )
snake_case__ : Optional[int] = word
return word
def _lowercase ( self : Optional[Any] , __A : Optional[Any] ):
snake_case__ : List[str] = []
for token in re.findall(self.pat , __A ):
snake_case__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) )
return bpe_tokens
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , __A : Optional[Any] ):
return self.decoder.get(__A )
def _lowercase ( self : Union[str, Any] , __A : Dict ):
snake_case__ : Optional[Any] = "".join(__A )
snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : str = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
snake_case__ : str = 0
with open(__A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case__ : int = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ):
snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
snake_case__ : Optional[int] = " " + text
return (text, kwargs)
def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
snake_case__ : Optional[Any] = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case__ : int = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case__ : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 25 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Dict = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""funnel-transformer/small""": """https://huggingface.co/funnel-transformer/small/resolve/main/config.json""",
"""funnel-transformer/small-base""": """https://huggingface.co/funnel-transformer/small-base/resolve/main/config.json""",
"""funnel-transformer/medium""": """https://huggingface.co/funnel-transformer/medium/resolve/main/config.json""",
"""funnel-transformer/medium-base""": """https://huggingface.co/funnel-transformer/medium-base/resolve/main/config.json""",
"""funnel-transformer/intermediate""": (
"""https://huggingface.co/funnel-transformer/intermediate/resolve/main/config.json"""
),
"""funnel-transformer/intermediate-base""": (
"""https://huggingface.co/funnel-transformer/intermediate-base/resolve/main/config.json"""
),
"""funnel-transformer/large""": """https://huggingface.co/funnel-transformer/large/resolve/main/config.json""",
"""funnel-transformer/large-base""": """https://huggingface.co/funnel-transformer/large-base/resolve/main/config.json""",
"""funnel-transformer/xlarge""": """https://huggingface.co/funnel-transformer/xlarge/resolve/main/config.json""",
"""funnel-transformer/xlarge-base""": """https://huggingface.co/funnel-transformer/xlarge-base/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "funnel"
a_ = {
"hidden_size": "d_model",
"num_attention_heads": "n_head",
}
def __init__( self : List[Any] , __A : Optional[Any]=3_0_5_2_2 , __A : str=[4, 4, 4] , __A : Optional[Any]=None , __A : Optional[Any]=2 , __A : Tuple=7_6_8 , __A : int=1_2 , __A : Union[str, Any]=6_4 , __A : List[str]=3_0_7_2 , __A : List[str]="gelu_new" , __A : Optional[int]=0.1 , __A : Optional[Any]=0.1 , __A : Union[str, Any]=0.0 , __A : Optional[int]=0.1 , __A : Tuple=None , __A : List[Any]=1e-9 , __A : Dict="mean" , __A : Union[str, Any]="relative_shift" , __A : Union[str, Any]=True , __A : Union[str, Any]=True , __A : Any=True , **__A : Optional[Any] , ):
snake_case__ : Optional[int] = vocab_size
snake_case__ : Any = block_sizes
snake_case__ : List[Any] = [1] * len(__A ) if block_repeats is None else block_repeats
assert len(__A ) == len(
self.block_repeats ), "`block_sizes` and `block_repeats` should have the same length."
snake_case__ : Dict = num_decoder_layers
snake_case__ : List[Any] = d_model
snake_case__ : Union[str, Any] = n_head
snake_case__ : List[Any] = d_head
snake_case__ : int = d_inner
snake_case__ : Union[str, Any] = hidden_act
snake_case__ : Any = hidden_dropout
snake_case__ : str = attention_dropout
snake_case__ : Dict = activation_dropout
snake_case__ : Dict = initializer_range
snake_case__ : Optional[Any] = initializer_std
snake_case__ : List[str] = layer_norm_eps
assert pooling_type in [
"mean",
"max",
], f'''Got {pooling_type} for `pooling_type` but only \'mean\' and \'max\' are supported.'''
snake_case__ : Optional[Any] = pooling_type
assert attention_type in [
"relative_shift",
"factorized",
], f'''Got {attention_type} for `attention_type` but only \'relative_shift\' and \'factorized\' are supported.'''
snake_case__ : int = attention_type
snake_case__ : int = separate_cls
snake_case__ : Union[str, Any] = truncate_seq
snake_case__ : Optional[int] = pool_q_only
super().__init__(**__A )
@property
def _lowercase ( self : List[Any] ):
return sum(self.block_sizes )
@num_hidden_layers.setter
def _lowercase ( self : int , __A : int ):
raise NotImplementedError(
"This model does not support the setting of `num_hidden_layers`. Please set `block_sizes`." )
@property
def _lowercase ( self : Optional[Any] ):
return len(self.block_sizes )
@num_blocks.setter
def _lowercase ( self : Optional[int] , __A : List[str] ):
raise NotImplementedError("This model does not support the setting of `num_blocks`. Please set `block_sizes`." )
| 25 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 25 | 1 |
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {"""vocab_file""": """spiece.model"""}
__lowerCamelCase : Union[str, Any] = {
"""vocab_file""": {
"""albert-base-v1""": """https://huggingface.co/albert-base-v1/resolve/main/spiece.model""",
"""albert-large-v1""": """https://huggingface.co/albert-large-v1/resolve/main/spiece.model""",
"""albert-xlarge-v1""": """https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model""",
"""albert-xxlarge-v1""": """https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model""",
"""albert-base-v2""": """https://huggingface.co/albert-base-v2/resolve/main/spiece.model""",
"""albert-large-v2""": """https://huggingface.co/albert-large-v2/resolve/main/spiece.model""",
"""albert-xlarge-v2""": """https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model""",
"""albert-xxlarge-v2""": """https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model""",
}
}
__lowerCamelCase : Any = {
"""albert-base-v1""": 512,
"""albert-large-v1""": 512,
"""albert-xlarge-v1""": 512,
"""albert-xxlarge-v1""": 512,
"""albert-base-v2""": 512,
"""albert-large-v2""": 512,
"""albert-xlarge-v2""": 512,
"""albert-xxlarge-v2""": 512,
}
__lowerCamelCase : Tuple = """▁"""
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : str , __A : List[Any] , __A : Optional[Any]=True , __A : Optional[Any]=True , __A : int=False , __A : Any="[CLS]" , __A : Tuple="[SEP]" , __A : List[str]="<unk>" , __A : List[Any]="[SEP]" , __A : str="<pad>" , __A : Dict="[CLS]" , __A : Dict="[MASK]" , __A : Optional[Dict[str, Any]] = None , **__A : Tuple , ):
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
snake_case__ : int = (
AddedToken(__A , lstrip=__A , rstrip=__A , normalized=__A )
if isinstance(__A , __A )
else mask_token
)
snake_case__ : Optional[int] = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__A , remove_space=__A , keep_accents=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , pad_token=__A , cls_token=__A , mask_token=__A , sp_model_kwargs=self.sp_model_kwargs , **__A , )
snake_case__ : Optional[Any] = do_lower_case
snake_case__ : List[Any] = remove_space
snake_case__ : Dict = keep_accents
snake_case__ : Tuple = vocab_file
snake_case__ : int = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__A )
@property
def _lowercase ( self : str ):
return len(self.sp_model )
def _lowercase ( self : Optional[Any] ):
snake_case__ : int = {self.convert_ids_to_tokens(__A ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ):
snake_case__ : List[Any] = self.__dict__.copy()
snake_case__ : List[Any] = None
return state
def __setstate__( self : Optional[int] , __A : int ):
snake_case__ : Optional[int] = d
# for backward compatibility
if not hasattr(self , "sp_model_kwargs" ):
snake_case__ : List[Any] = {}
snake_case__ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _lowercase ( self : Optional[Any] , __A : List[Any] ):
if self.remove_space:
snake_case__ : Optional[Any] = " ".join(inputs.strip().split() )
else:
snake_case__ : str = inputs
snake_case__ : str = outputs.replace("``" , "\"" ).replace("''" , "\"" )
if not self.keep_accents:
snake_case__ : Union[str, Any] = unicodedata.normalize("NFKD" , __A )
snake_case__ : int = "".join([c for c in outputs if not unicodedata.combining(__A )] )
if self.do_lower_case:
snake_case__ : Tuple = outputs.lower()
return outputs
def _lowercase ( self : int , __A : str ):
snake_case__ : Optional[int] = self.preprocess_text(__A )
snake_case__ : Dict = self.sp_model.encode(__A , out_type=__A )
snake_case__ : str = []
for piece in pieces:
if len(__A ) > 1 and piece[-1] == str("," ) and piece[-2].isdigit():
snake_case__ : Optional[Any] = self.sp_model.EncodeAsPieces(piece[:-1].replace(__A , "" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
snake_case__ : Optional[int] = cur_pieces[1:]
else:
snake_case__ : Dict = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__A )
else:
new_pieces.append(__A )
return new_pieces
def _lowercase ( self : Tuple , __A : str ):
return self.sp_model.PieceToId(__A )
def _lowercase ( self : Tuple , __A : Optional[int] ):
return self.sp_model.IdToPiece(__A )
def _lowercase ( self : Tuple , __A : List[Any] ):
snake_case__ : str = []
snake_case__ : str = ""
snake_case__ : Union[str, Any] = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(__A ) + token
snake_case__ : int = True
snake_case__ : int = []
else:
current_sub_tokens.append(__A )
snake_case__ : Optional[Any] = False
out_string += self.sp_model.decode(__A )
return out_string.strip()
def _lowercase ( self : Optional[int] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Tuple = [self.sep_token_id]
snake_case__ : Any = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _lowercase ( self : Tuple , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is not None:
return [1] + ([0] * len(__A )) + [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1]
def _lowercase ( self : str , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _lowercase ( self : Union[str, Any] , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__A ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __A )
elif not os.path.isfile(self.vocab_file ):
with open(__A , "wb" ) as fi:
snake_case__ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(__A )
return (out_vocab_file,)
| 25 |
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Any = [0] * len(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
# use last results for better performance - dynamic programming
snake_case__ : Union[str, Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
snake_case__ : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
snake_case__ : int = j
return prefix_result
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return max(prefix_function(snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : List[Any] = {"""configuration_vit_msn""": ["""VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ViTMSNConfig"""]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ViTMSNModel""",
"""ViTMSNForImageClassification""",
"""ViTMSNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_vit_msn import VIT_MSN_PRETRAINED_CONFIG_ARCHIVE_MAP, ViTMSNConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vit_msn import (
VIT_MSN_PRETRAINED_MODEL_ARCHIVE_LIST,
ViTMSNForImageClassification,
ViTMSNModel,
ViTMSNPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Union[str, Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[int] = get_logger()
__lowerCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case__ : str = str(jax.devices()[0] )
snake_case__ : str = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def _lowercase ( self : Optional[Any] , __A : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def _lowercase ( self : int , __A : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ : Optional[int] = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case__ : Any = {"dtype": jnp.intaa}
else:
snake_case__ : Tuple = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
snake_case__ : Optional[Any] = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ):
snake_case__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def _lowercase ( self : Tuple , __A : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def _lowercase ( self : Optional[int] , __A : pa.Table ):
snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A )
snake_case__ : Tuple = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def _lowercase ( self : Optional[Any] , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A )
snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
snake_case__ : Dict = self._consolidate(__A )
return column
def _lowercase ( self : str , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A )
snake_case__ : int = self.python_features_decoder.decode_batch(__A )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
for column_name in batch:
snake_case__ : Any = self._consolidate(batch[column_name] )
return batch
| 25 | 1 |
__lowerCamelCase : int = range(2, 20 + 1)
__lowerCamelCase : int = [10**k for k in range(ks[-1] + 1)]
__lowerCamelCase : dict[int, dict[int, list[list[int]]]] = {}
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Optional[int] , snake_case_ : int , snake_case_ : Optional[int] ):
snake_case__ : Optional[int] = sum(a_i[j] for j in range(snake_case_ , len(snake_case_ ) ) )
snake_case__ : Optional[Any] = sum(a_i[j] * base[j] for j in range(min(len(snake_case_ ) , snake_case_ ) ) )
snake_case__, snake_case__ : Tuple = 0, 0
snake_case__ : Tuple = n - i
snake_case__ : int = memo.get(snake_case_ )
if sub_memo is not None:
snake_case__ : Tuple = sub_memo.get(snake_case_ )
if jumps is not None and len(snake_case_ ) > 0:
# find and make the largest jump without going over
snake_case__ : Dict = -1
for _k in range(len(snake_case_ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
snake_case__ : Union[str, Any] = _k
break
if max_jump >= 0:
snake_case__, snake_case__, snake_case__ : Any = jumps[max_jump]
# since the difference between jumps is cached, add c
snake_case__ : str = diff + c
for j in range(min(snake_case_ , len(snake_case_ ) ) ):
snake_case__, snake_case__ : List[Any] = divmod(snake_case_ , 10 )
if new_c > 0:
add(snake_case_ , snake_case_ , snake_case_ )
else:
snake_case__ : List[Any] = []
else:
snake_case__ : str = {c: []}
snake_case__ : str = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
snake_case__, snake_case__ : Any = next_term(snake_case_ , k - 1 , i + dn , snake_case_ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
snake_case__, snake_case__ : List[Any] = compute(snake_case_ , snake_case_ , i + dn , snake_case_ )
diff += _diff
dn += terms_jumped
snake_case__ : Optional[Any] = sub_memo[c]
# keep jumps sorted by # of terms skipped
snake_case__ : Optional[int] = 0
while j < len(snake_case_ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(snake_case_ , (diff, dn, k) )
return (diff, dn)
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : str , snake_case_ : Optional[int] ):
if i >= n:
return 0, i
if k > len(snake_case_ ):
a_i.extend([0 for _ in range(k - len(snake_case_ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
snake_case__ : Tuple = i
snake_case__, snake_case__, snake_case__ : List[Any] = 0, 0, 0
for j in range(len(snake_case_ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
snake_case__ : str = ds_c + ds_b
diff += addend
snake_case__ : Any = 0
for j in range(snake_case_ ):
snake_case__ : Optional[int] = a_i[j] + addend
snake_case__, snake_case__ : str = divmod(snake_case_ , 10 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(snake_case_ , snake_case_ , snake_case_ )
return diff, i - start_i
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : List[str] , snake_case_ : List[str] ):
for j in range(snake_case_ , len(snake_case_ ) ):
snake_case__ : int = digits[j] + addend
if s >= 10:
snake_case__, snake_case__ : List[Any] = divmod(snake_case_ , 10 )
snake_case__ : Optional[int] = addend // 10 + quotient
else:
snake_case__ : Optional[Any] = s
snake_case__ : Optional[Any] = addend // 10
if addend == 0:
break
while addend > 0:
snake_case__, snake_case__ : List[str] = divmod(snake_case_ , 10 )
digits.append(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 10**15 ):
snake_case__ : List[Any] = [1]
snake_case__ : int = 1
snake_case__ : List[Any] = 0
while True:
snake_case__, snake_case__ : Optional[Any] = next_term(snake_case_ , 20 , i + dn , snake_case_ )
dn += terms_jumped
if dn == n - i:
break
snake_case__ : Union[str, Any] = 0
for j in range(len(snake_case_ ) ):
a_n += digits[j] * 10**j
return a_n
if __name__ == "__main__":
print(f"{solution() = }")
| 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 1 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Optional[Any] = parent
snake_case__ : str = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : Optional[Any] = min_resolution
snake_case__ : List[str] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : str = size
snake_case__ : str = do_normalize
snake_case__ : Optional[Any] = image_mean
snake_case__ : List[str] = image_std
snake_case__ : List[str] = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : Tuple = do_pad
def _lowercase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ):
if not batched:
snake_case__ : List[Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : str = image.size
else:
snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Any = int(self.size["shortest_edge"] * h / w )
snake_case__ : Any = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Any = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Tuple = self.size["shortest_edge"]
snake_case__ : int = self.size["shortest_edge"]
else:
snake_case__ : Any = []
for image in image_inputs:
snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : int = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Tuple ):
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : str ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : int ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[Any] ):
# prepare image and target
snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Tuple = json.loads(f.read() )
snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : str = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image, target and masks_path
snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Tuple = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def _lowercase ( self : Dict ):
snake_case__ : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Union[str, Any] = get_activation("gelu" )
snake_case__ : int = get_activation("gelu_10" )
snake_case__ : Optional[int] = torch_builtin(__A )
snake_case__ : Dict = geluaa(__A )
snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowercase ( self : str ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__A ):
get_activation("bogus" )
with self.assertRaises(__A ):
get_activation(__A )
def _lowercase ( self : List[str] ):
snake_case__ : List[str] = get_activation("gelu" )
snake_case__ : Any = 1
snake_case__ : Union[str, Any] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
snake_case__ : int = acta.a
| 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : List[Any] = {
"""configuration_swinv2""": ["""SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP""", """Swinv2Config"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""Swinv2ForImageClassification""",
"""Swinv2ForMaskedImageModeling""",
"""Swinv2Model""",
"""Swinv2PreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_swinva import SWINV2_PRETRAINED_CONFIG_ARCHIVE_MAP, SwinvaConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_swinva import (
SWINV2_PRETRAINED_MODEL_ARCHIVE_LIST,
SwinvaForImageClassification,
SwinvaForMaskedImageModeling,
SwinvaModel,
SwinvaPreTrainedModel,
)
else:
import sys
__lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : str = value
elif weight_type == "weight_g":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : str = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
snake_case__ : str = []
snake_case__ : Optional[int] = fairseq_model.state_dict()
snake_case__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[Any] = "weight"
else:
snake_case__ : Optional[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
snake_case__ : Tuple = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : str = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ):
if config_path is not None:
snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = UniSpeechSatConfig()
snake_case__ : str = ""
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ )
else:
snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ )
snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25 | 1 |
from __future__ import annotations
import requests
__lowerCamelCase : Optional[Any] = set(
"""approved_at_utc approved_by author_flair_background_color
author_flair_css_class author_flair_richtext author_flair_template_id author_fullname
author_premium can_mod_post category clicked content_categories created_utc downs
edited gilded gildings hidden hide_score is_created_from_ads_ui is_meta
is_original_content is_reddit_media_domain is_video link_flair_css_class
link_flair_richtext link_flair_text link_flair_text_color media_embed mod_reason_title
name permalink pwls quarantine saved score secure_media secure_media_embed selftext
subreddit subreddit_name_prefixed subreddit_type thumbnail title top_awarded_type
total_awards_received ups upvote_ratio url user_reports""".split()
)
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : int = 1 , snake_case_ : str = "new" , snake_case_ : list | None = None ):
snake_case__ : Optional[int] = wanted_data or []
if invalid_search_terms := ", ".join(sorted(set(snake_case_ ) - valid_terms ) ):
snake_case__ : str = F'''Invalid search term: {invalid_search_terms}'''
raise ValueError(snake_case_ )
snake_case__ : Optional[int] = requests.get(
F'''https://reddit.com/r/{subreddit}/{age}.json?limit={limit}''' , headers={"User-agent": "A random string"} , )
if response.status_code == 429:
raise requests.HTTPError
snake_case__ : List[Any] = response.json()
if not wanted_data:
return {id_: data["data"]["children"][id_] for id_ in range(snake_case_ )}
snake_case__ : Optional[Any] = {}
for id_ in range(snake_case_ ):
snake_case__ : Tuple = {
item: data["data"]["children"][id_]["data"][item] for item in wanted_data
}
return data_dict
if __name__ == "__main__":
# If you get Error 429, that means you are rate limited.Try after some time
print(get_subreddit_data("""learnpython""", wanted_data=["""title""", """url""", """selftext"""]))
| 25 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , ):
if attention_mask is None:
snake_case__ : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case__ : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case__ : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ )
if decoder_head_mask is None:
snake_case__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
if cross_attn_head_mask is None:
snake_case__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Any , __A : List[str]=1_3 , __A : List[Any]=7 , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : str=9_9 , __A : Optional[Any]=1_6 , __A : Optional[Any]=2 , __A : Any=4 , __A : List[Any]=4 , __A : int="relu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[Any]=2_0 , __A : Optional[Any]=2 , __A : int=1 , __A : Union[str, Any]=0 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : List[str] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : List[str] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : int = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Tuple = eos_token_id
snake_case__ : Dict = pad_token_id
snake_case__ : str = bos_token_id
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.eos_token_id # Eos Token
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case__ : int = input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Union[str, Any] = self.get_config()
snake_case__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _lowercase ( self : Dict ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : Optional[Any] , __A : int , __A : Dict ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
snake_case__ : List[Any] = inputs_dict["input_ids"]
snake_case__ : Optional[Any] = inputs_dict["attention_mask"]
snake_case__ : Union[str, Any] = inputs_dict["head_mask"]
# first forward pass
snake_case__ : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
snake_case__, snake_case__ : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case__ : Tuple = model(__A , attention_mask=__A )["last_hidden_state"]
snake_case__ : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[
"last_hidden_state"
]
# select random slice
snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) )
def _lowercase ( self : str , __A : Dict , __A : Optional[Any] ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval()
snake_case__ : Union[str, Any] = model(**__A )
snake_case__ : Tuple = outputs.encoder_last_hidden_state
snake_case__ : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_encoder()
encoder.save_pretrained(__A )
snake_case__ : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_decoder()
decoder.save_pretrained(__A )
snake_case__ : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a_ = True
a_ = True
a_ = False
a_ = False
def _lowercase ( self : int , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowercase ( self : Tuple ):
snake_case__ : Any = MaMaaaModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__A )
def _lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
snake_case__, snake_case__ : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["missing_keys"] , [] )
def _lowercase ( self : Dict ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def _lowercase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case__ : str = model_class(__A )
model.to(__A )
model.eval()
snake_case__ : str = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
snake_case__ : Optional[Any] = inputs["input_ids"]
del inputs["input_ids"]
else:
snake_case__ : Union[str, Any] = inputs["input_ids"]
snake_case__ : List[str] = inputs.get("decoder_input_ids" , __A )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , __A )
snake_case__ : Tuple = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case__ : List[Any] = wte(__A )
else:
snake_case__ : Any = wte(__A )
snake_case__ : Optional[int] = wte(__A )
with torch.no_grad():
model(**__A )[0]
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Any = input_dict["input_ids"]
snake_case__ : int = input_ids.ne(1 ).to(__A )
snake_case__ : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ )
__lowerCamelCase : Optional[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ):
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def _lowercase ( self : Optional[int] ):
snake_case__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : str = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : Optional[Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
# change to intended input
snake_case__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : List[str] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
snake_case__ : List[Any] = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case__ : str = tokenizer(__A , padding=__A , return_tensors="pt" )
snake_case__ : Tuple = model.generate(
input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
snake_case__ : List[str] = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
snake_case__ : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 25 | 1 |
import pytest
from datasets.splits import SplitDict, SplitInfo
from datasets.utils.py_utils import asdict
@pytest.mark.parametrize(
"split_dict" , [
SplitDict(),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 , dataset_name="my_dataset" )} ),
SplitDict({"train": SplitInfo(name="train" , num_bytes=1337 , num_examples=42 )} ),
SplitDict({"train": SplitInfo()} ),
] , )
def SCREAMING_SNAKE_CASE ( snake_case_ : SplitDict ):
snake_case__ : List[Any] = split_dict._to_yaml_list()
assert len(snake_case_ ) == len(snake_case_ )
snake_case__ : str = SplitDict._from_yaml_list(snake_case_ )
for split_name, split_info in split_dict.items():
# dataset_name field is deprecated, and is therefore not part of the YAML dump
snake_case__ : Tuple = None
# the split name of split_dict takes over the name of the split info object
snake_case__ : Optional[Any] = split_name
assert split_dict == reloaded
@pytest.mark.parametrize(
"split_info" , [SplitInfo(), SplitInfo(dataset_name=snake_case_ ), SplitInfo(dataset_name="my_dataset" )] )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
# For backward compatibility, we need asdict(split_dict) to return split info dictrionaries with the "dataset_name"
# field even if it's deprecated. This way old versionso of `datasets` can still reload dataset_infos.json files
snake_case__ : Optional[int] = asdict(SplitDict({"train": split_info} ) )
assert "dataset_name" in split_dict_asdict["train"]
assert split_dict_asdict["train"]["dataset_name"] == split_info.dataset_name
| 25 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] ):
snake_case__ : Optional[int] = []
for part_id in partition_order:
snake_case__ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(snake_case_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Union[str, Any] = spark.range(100 ).repartition(1 )
snake_case__ : Any = Spark(snake_case_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[Any] = spark.range(10 ).repartition(2 )
snake_case__ : Optional[Any] = [1, 0]
snake_case__ : Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions.
snake_case__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case__, snake_case__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[int] = spark.range(10 ).repartition(1 )
snake_case__ : Union[str, Any] = SparkExamplesIterable(snake_case_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
snake_case__ : Union[str, Any] = lambda snake_case_ : x.reverse()
snake_case__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] )
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case__ : Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(100 ).repartition(1 )
snake_case__ : Union[str, Any] = Spark(snake_case_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 25 | 1 |
import unittest
from transformers import (
MODEL_FOR_OBJECT_DETECTION_MAPPING,
AutoFeatureExtractor,
AutoModelForObjectDetection,
ObjectDetectionPipeline,
is_vision_available,
pipeline,
)
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_pytesseract,
require_tf,
require_timm,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
@staticmethod
def _lowercase ( *__A : List[Any] , **__A : Optional[Any] ):
pass
@is_pipeline_test
@require_vision
@require_timm
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
a_ = MODEL_FOR_OBJECT_DETECTION_MAPPING
def _lowercase ( self : Optional[int] , __A : Optional[int] , __A : Optional[int] , __A : Any ):
snake_case__ : Dict = ObjectDetectionPipeline(model=__A , image_processor=__A )
return object_detector, ["./tests/fixtures/tests_samples/COCO/000000039769.png"]
def _lowercase ( self : str , __A : int , __A : str ):
snake_case__ : List[str] = object_detector("./tests/fixtures/tests_samples/COCO/000000039769.png" , threshold=0.0 )
self.assertGreater(len(__A ) , 0 )
for detected_object in outputs:
self.assertEqual(
__A , {
"score": ANY(__A ),
"label": ANY(__A ),
"box": {"xmin": ANY(__A ), "ymin": ANY(__A ), "xmax": ANY(__A ), "ymax": ANY(__A )},
} , )
import datasets
snake_case__ : List[Any] = datasets.load_dataset("hf-internal-testing/fixtures_image_utils" , "image" , split="test" )
snake_case__ : List[str] = [
Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" ),
"http://images.cocodataset.org/val2017/000000039769.jpg",
# RGBA
dataset[0]["file"],
# LA
dataset[1]["file"],
# L
dataset[2]["file"],
]
snake_case__ : List[Any] = object_detector(__A , threshold=0.0 )
self.assertEqual(len(__A ) , len(__A ) )
for outputs in batch_outputs:
self.assertGreater(len(__A ) , 0 )
for detected_object in outputs:
self.assertEqual(
__A , {
"score": ANY(__A ),
"label": ANY(__A ),
"box": {"xmin": ANY(__A ), "ymin": ANY(__A ), "xmax": ANY(__A ), "ymax": ANY(__A )},
} , )
@require_tf
@unittest.skip("Object detection not implemented in TF" )
def _lowercase ( self : List[str] ):
pass
@require_torch
def _lowercase ( self : str ):
snake_case__ : Optional[int] = "hf-internal-testing/tiny-detr-mobilenetsv3"
snake_case__ : List[str] = AutoModelForObjectDetection.from_pretrained(__A )
snake_case__ : str = AutoFeatureExtractor.from_pretrained(__A )
snake_case__ : Tuple = ObjectDetectionPipeline(model=__A , feature_extractor=__A )
snake_case__ : List[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=0.0 )
self.assertEqual(
nested_simplify(__A , decimals=4 ) , [
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
] , )
snake_case__ : List[Any] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] , threshold=0.0 , )
self.assertEqual(
nested_simplify(__A , decimals=4 ) , [
[
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
],
[
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
{"score": 0.3_3_7_6, "label": "LABEL_0", "box": {"xmin": 1_5_9, "ymin": 1_2_0, "xmax": 4_8_0, "ymax": 3_5_9}},
],
] , )
@require_torch
@slow
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = "facebook/detr-resnet-50"
snake_case__ : Tuple = AutoModelForObjectDetection.from_pretrained(__A )
snake_case__ : Union[str, Any] = AutoFeatureExtractor.from_pretrained(__A )
snake_case__ : Dict = ObjectDetectionPipeline(model=__A , feature_extractor=__A )
snake_case__ : List[Any] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__A , decimals=4 ) , [
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
] , )
snake_case__ : List[str] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__A , decimals=4 ) , [
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
] , )
@require_torch
@slow
def _lowercase ( self : Dict ):
snake_case__ : Dict = "facebook/detr-resnet-50"
snake_case__ : Optional[Any] = pipeline("object-detection" , model=__A )
snake_case__ : Optional[int] = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" )
self.assertEqual(
nested_simplify(__A , decimals=4 ) , [
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
] , )
snake_case__ : Optional[Any] = object_detector(
[
"http://images.cocodataset.org/val2017/000000039769.jpg",
"http://images.cocodataset.org/val2017/000000039769.jpg",
] )
self.assertEqual(
nested_simplify(__A , decimals=4 ) , [
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
[
{"score": 0.9_9_8_2, "label": "remote", "box": {"xmin": 4_0, "ymin": 7_0, "xmax": 1_7_5, "ymax": 1_1_7}},
{"score": 0.9_9_6_0, "label": "remote", "box": {"xmin": 3_3_3, "ymin": 7_2, "xmax": 3_6_8, "ymax": 1_8_7}},
{"score": 0.9_9_5_5, "label": "couch", "box": {"xmin": 0, "ymin": 1, "xmax": 6_3_9, "ymax": 4_7_3}},
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
],
] , )
@require_torch
@slow
def _lowercase ( self : Optional[Any] ):
snake_case__ : int = 0.9_9_8_5
snake_case__ : List[Any] = "facebook/detr-resnet-50"
snake_case__ : int = pipeline("object-detection" , model=__A )
snake_case__ : int = object_detector("http://images.cocodataset.org/val2017/000000039769.jpg" , threshold=__A )
self.assertEqual(
nested_simplify(__A , decimals=4 ) , [
{"score": 0.9_9_8_8, "label": "cat", "box": {"xmin": 1_3, "ymin": 5_2, "xmax": 3_1_4, "ymax": 4_7_0}},
{"score": 0.9_9_8_7, "label": "cat", "box": {"xmin": 3_4_5, "ymin": 2_3, "xmax": 6_4_0, "ymax": 3_6_8}},
] , )
@require_torch
@require_pytesseract
@slow
def _lowercase ( self : Any ):
snake_case__ : Any = "Narsil/layoutlmv3-finetuned-funsd"
snake_case__ : List[Any] = 0.9_9_9_3
snake_case__ : Union[str, Any] = pipeline("object-detection" , model=__A , threshold=__A )
snake_case__ : str = object_detector(
"https://huggingface.co/spaces/impira/docquery/resolve/2359223c1837a7587402bda0f2643382a6eefeab/invoice.png" )
self.assertEqual(
nested_simplify(__A , decimals=4 ) , [
{"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 2_9_4, "ymin": 2_5_4, "xmax": 3_4_3, "ymax": 2_6_4}},
{"score": 0.9_9_9_3, "label": "I-ANSWER", "box": {"xmin": 2_9_4, "ymin": 2_5_4, "xmax": 3_4_3, "ymax": 2_6_4}},
] , )
| 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 1 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 4000000 ):
snake_case__ : Any = []
snake_case__, snake_case__ : Optional[int] = 0, 1
while b <= n:
if b % 2 == 0:
even_fibs.append(snake_case_ )
snake_case__, snake_case__ : List[str] = b, a + b
return sum(snake_case_ )
if __name__ == "__main__":
print(f"{solution() = }")
| 25 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__lowerCamelCase : Optional[int] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""BartphoTokenizer"""]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_bartpho import BartphoTokenizer
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE ( snake_case_ : dict ):
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ):
snake_case__ : Optional[int] = XGBClassifier()
classifier.fit(snake_case_ , snake_case_ )
return classifier
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = load_iris()
snake_case__, snake_case__ : str = data_handling(snake_case_ )
snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split(
snake_case_ , snake_case_ , test_size=0.25 )
snake_case__ : Dict = iris["target_names"]
# Create an XGBoost Classifier from the training data
snake_case__ : Dict = xgboost(snake_case_ , snake_case_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ):
snake_case__ : Tuple = args.log_outputs
snake_case__ : Union[str, Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
snake_case__ : List[str] = load_metric("wer" )
snake_case__ : List[str] = load_metric("cer" )
# compute metrics
snake_case__ : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
snake_case__ : List[str] = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
snake_case__ : Dict = F'''WER: {wer_result}\nCER: {cer_result}'''
print(snake_case_ )
with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case__ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt'''
snake_case__ : int = F'''log_{dataset_id}_targets.txt'''
with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Union[str, Any] , snake_case_ : Any ):
p.write(F'''{i}''' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(F'''{i}''' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case_ , with_indices=snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case__ : Optional[int] = re.sub(snake_case_ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case__ : Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
snake_case__ : Optional[int] = " ".join(text.split(snake_case_ ) )
return text
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# load dataset
snake_case__ : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case__ : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case__ : List[Any] = feature_extractor.sampling_rate
# resample audio
snake_case__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
snake_case__ : int = 0 if torch.cuda.is_available() else -1
snake_case__ : List[str] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Any ):
snake_case__ : Union[str, Any] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case__ : Optional[int] = prediction["text"]
snake_case__ : Optional[Any] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
snake_case__ : Any = dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__lowerCamelCase : str = parser.parse_args()
main(args)
| 25 | 1 |
import logging
import os
from .state import PartialState
class SCREAMING_SNAKE_CASE__ ( logging.LoggerAdapter ):
"""simple docstring"""
@staticmethod
def _lowercase ( __A : Optional[int] ):
snake_case__ : Optional[int] = PartialState()
return not main_process_only or (main_process_only and state.is_main_process)
def _lowercase ( self : Dict , __A : str , __A : Any , *__A : Tuple , **__A : Union[str, Any] ):
if PartialState._shared_state == {}:
raise RuntimeError(
"You must initialize the accelerate state by calling either `PartialState()` or `Accelerator()` before using the logging utility." )
snake_case__ : int = kwargs.pop("main_process_only" , __A )
snake_case__ : int = kwargs.pop("in_order" , __A )
if self.isEnabledFor(__A ):
if self._should_log(__A ):
snake_case__, snake_case__ : Optional[Any] = self.process(__A , __A )
self.logger.log(__A , __A , *__A , **__A )
elif in_order:
snake_case__ : Tuple = PartialState()
for i in range(state.num_processes ):
if i == state.process_index:
snake_case__, snake_case__ : Dict = self.process(__A , __A )
self.logger.log(__A , __A , *__A , **__A )
state.wait_for_everyone()
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str = None ):
if log_level is None:
snake_case__ : str = os.environ.get("ACCELERATE_LOG_LEVEL" , snake_case_ )
snake_case__ : Any = logging.getLogger(snake_case_ )
if log_level is not None:
logger.setLevel(log_level.upper() )
logger.root.setLevel(log_level.upper() )
return MultiProcessAdapter(snake_case_ , {} )
| 25 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
a_ = Features({"text": Value("string" )} )
a_ = Features({"labels": ClassLabel} )
a_ = "text"
a_ = "labels"
def _lowercase ( self : Tuple , __A : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
snake_case__ : Any = copy.deepcopy(self )
snake_case__ : Optional[Any] = self.label_schema.copy()
snake_case__ : List[str] = features[self.label_column]
snake_case__ : Dict = label_schema
return task_template
@property
def _lowercase ( self : Tuple ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 25 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : Dict = 2
snake_case__ : Union[str, Any] = []
while i * i <= n:
if n % i:
i += 1
else:
n //= i
factors.append(snake_case_ )
if n > 1:
factors.append(snake_case_ )
return factors
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_vision_model"
def __init__( self : List[Any] , __A : Dict=1_4_0_8 , __A : Tuple=6_1_4_4 , __A : str=3_9 , __A : int=1_6 , __A : str=2_2_4 , __A : Any=1_4 , __A : Dict="gelu" , __A : List[Any]=1e-6 , __A : Any=0.0 , __A : List[Any]=1e-1_0 , __A : Union[str, Any]=True , **__A : Tuple , ):
super().__init__(**__A )
snake_case__ : List[str] = hidden_size
snake_case__ : Optional[int] = intermediate_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : str = patch_size
snake_case__ : int = image_size
snake_case__ : int = initializer_range
snake_case__ : Optional[int] = attention_dropout
snake_case__ : str = layer_norm_eps
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Tuple = qkv_bias
@classmethod
def _lowercase ( cls : List[str] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : str = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : Union[str, Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_qformer"
def __init__( self : Any , __A : Union[str, Any]=3_0_5_2_2 , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=1_2 , __A : Dict=1_2 , __A : Dict=3_0_7_2 , __A : List[str]="gelu" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Any=5_1_2 , __A : Optional[int]=0.0_2 , __A : List[str]=1e-1_2 , __A : Any=0 , __A : Optional[Any]="absolute" , __A : str=2 , __A : Any=1_4_0_8 , **__A : List[str] , ):
super().__init__(pad_token_id=__A , **__A )
snake_case__ : Dict = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : int = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : Dict = cross_attention_frequency
snake_case__ : List[str] = encoder_hidden_size
@classmethod
def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Optional[int] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : Tuple = cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : List[Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip"
a_ = True
def __init__( self : List[str] , __A : Optional[Any]=None , __A : Tuple=None , __A : Optional[int]=None , __A : Optional[Any]=3_2 , **__A : Optional[int] ):
super().__init__(**__A )
if vision_config is None:
snake_case__ : Any = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
snake_case__ : Optional[Any] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
snake_case__ : Optional[int] = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
snake_case__ : List[Any] = InstructBlipVisionConfig(**__A )
snake_case__ : Union[str, Any] = InstructBlipQFormerConfig(**__A )
snake_case__ : Dict = text_config["model_type"] if "model_type" in text_config else "opt"
snake_case__ : List[Any] = CONFIG_MAPPING[text_model_type](**__A )
snake_case__ : Union[str, Any] = self.text_config.tie_word_embeddings
snake_case__ : Tuple = self.text_config.is_encoder_decoder
snake_case__ : str = num_query_tokens
snake_case__ : Dict = self.vision_config.hidden_size
snake_case__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case__ : int = 1.0
snake_case__ : Optional[int] = 0.0_2
@classmethod
def _lowercase ( cls : List[str] , __A : InstructBlipVisionConfig , __A : InstructBlipQFormerConfig , __A : PretrainedConfig , **__A : int , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def _lowercase ( self : Optional[int] ):
snake_case__ : Any = copy.deepcopy(self.__dict__ )
snake_case__ : Optional[Any] = self.vision_config.to_dict()
snake_case__ : List[str] = self.qformer_config.to_dict()
snake_case__ : List[Any] = self.text_config.to_dict()
snake_case__ : List[Any] = self.__class__.model_type
return output
| 25 | 1 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ):
snake_case__ : Tuple = args.log_outputs
snake_case__ : Union[str, Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
snake_case__ : List[str] = load_metric("wer" )
snake_case__ : List[str] = load_metric("cer" )
# compute metrics
snake_case__ : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
snake_case__ : List[str] = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
snake_case__ : Dict = F'''WER: {wer_result}\nCER: {cer_result}'''
print(snake_case_ )
with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case__ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt'''
snake_case__ : int = F'''log_{dataset_id}_targets.txt'''
with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Union[str, Any] , snake_case_ : Any ):
p.write(F'''{i}''' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(F'''{i}''' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case_ , with_indices=snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case__ : Optional[int] = re.sub(snake_case_ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case__ : Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
snake_case__ : Optional[int] = " ".join(text.split(snake_case_ ) )
return text
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# load dataset
snake_case__ : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case__ : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case__ : List[Any] = feature_extractor.sampling_rate
# resample audio
snake_case__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
snake_case__ : int = 0 if torch.cuda.is_available() else -1
snake_case__ : List[str] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Any ):
snake_case__ : Union[str, Any] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case__ : Optional[int] = prediction["text"]
snake_case__ : Optional[Any] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
snake_case__ : Any = dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__lowerCamelCase : str = parser.parse_args()
main(args)
| 25 |
def SCREAMING_SNAKE_CASE ( snake_case_ : list ):
if len(snake_case_ ) <= 1:
return lst
snake_case__ : List[Any] = 1
while i < len(snake_case_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
snake_case__, snake_case__ : Tuple = lst[i], lst[i - 1]
i -= 1
if i == 0:
snake_case__ : Union[str, Any] = 1
return lst
if __name__ == "__main__":
__lowerCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 25 | 1 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int ):
return numa ^ numa < 0
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 |
from __future__ import annotations
import time
__lowerCamelCase : str = list[tuple[int, int]]
__lowerCamelCase : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ):
snake_case__ : Optional[int] = pos_x
snake_case__ : Dict = pos_y
snake_case__ : int = (pos_y, pos_x)
snake_case__ : Optional[int] = goal_x
snake_case__ : Tuple = goal_y
snake_case__ : str = parent
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ):
snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A )
snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A )
snake_case__ : int = [self.start]
snake_case__ : Union[str, Any] = False
def _lowercase ( self : Dict ):
while self.node_queue:
snake_case__ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ : Optional[Any] = True
return self.retrace_path(__A )
snake_case__ : int = self.get_successors(__A )
for node in successors:
self.node_queue.append(__A )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Union[str, Any] , __A : Node ):
snake_case__ : str = []
for action in delta:
snake_case__ : str = parent.pos_x + action[1]
snake_case__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) )
return successors
def _lowercase ( self : Optional[Any] , __A : Node | None ):
snake_case__ : Tuple = node
snake_case__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : Tuple = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : str , __A : int ):
snake_case__ : str = BreadthFirstSearch(__A , __A )
snake_case__ : int = BreadthFirstSearch(__A , __A )
snake_case__ : Tuple = False
def _lowercase ( self : Optional[Any] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 )
snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ : List[str] = True
return self.retrace_bidirectional_path(
__A , __A )
snake_case__ : Union[str, Any] = current_bwd_node
snake_case__ : Dict = current_fwd_node
snake_case__ : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(__A ),
self.bwd_bfs: self.bwd_bfs.get_successors(__A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Any , __A : Node , __A : Node ):
snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A )
snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowerCamelCase : str = (0, 0)
__lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : Any = time.time()
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal)
__lowerCamelCase : str = bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
__lowerCamelCase : Optional[Any] = time.time()
__lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
__lowerCamelCase : str = bd_bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 25 | 1 |
from collections.abc import Generator
def SCREAMING_SNAKE_CASE ( ):
snake_case__, snake_case__ : Union[str, Any] = 0, 1
while True:
snake_case__, snake_case__ : List[Any] = b, a + b
yield b
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000 ):
snake_case__ : Tuple = 1
snake_case__ : Dict = fibonacci_generator()
while len(str(next(snake_case_ ) ) ) < n:
answer += 1
return answer + 1
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 25 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __A : Dict , __A : int=7 , __A : Optional[Any]=3 , __A : List[str]=3_0 , __A : List[Any]=4_0_0 , __A : Union[str, Any]=True , __A : List[Any]=None , __A : Optional[Any]=True , __A : Tuple=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : Any=1 / 2_5_5 , __A : Optional[int]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : str = min_resolution
snake_case__ : Tuple = max_resolution
snake_case__ : List[Any] = do_resize
snake_case__ : Dict = size
snake_case__ : List[str] = do_normalize
snake_case__ : Optional[int] = image_mean
snake_case__ : Optional[int] = image_std
snake_case__ : Any = do_rescale
snake_case__ : Optional[int] = rescale_factor
snake_case__ : int = do_pad
def _lowercase ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[int] , __A : Dict , __A : List[Any]=False ):
if not batched:
snake_case__ : List[str] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : Tuple = image.size
else:
snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case__ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case__ : List[Any] = self.size["shortest_edge"]
snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Dict = self.size["shortest_edge"]
snake_case__ : Dict = self.size["shortest_edge"]
else:
snake_case__ : str = []
for image in image_inputs:
snake_case__, snake_case__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Dict = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : Tuple = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : int ):
snake_case__ : Tuple = ConditionalDetrImageProcessingTester(self )
@property
def _lowercase ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Any ):
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : List[str] ):
snake_case__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : Union[str, Any] ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : str = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : List[Any] ):
# prepare image and target
snake_case__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Union[str, Any] = json.loads(f.read() )
snake_case__ : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : Tuple = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case__ : int = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : str ):
# prepare image, target and masks_path
snake_case__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : int = json.loads(f.read() )
snake_case__ : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : Optional[int] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : str = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 1 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCamelCase : Tuple = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
snake_case__ : Optional[int] = bs[:]
snake_case__ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
snake_case__ : Dict = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__ : Dict = set()
snake_case__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : List[Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ):
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="utf-8" ) as vocab_handle:
snake_case__ : Any = json.load(__A )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
snake_case__ : Union[str, Any] = errors # how to handle errors in decoding
snake_case__ : Any = bytes_to_unicode()
snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="utf-8" ) as merges_handle:
snake_case__ : str = merges_handle.read().split("\n" )[1:-1]
snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Optional[int] = {}
snake_case__ : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _lowercase ( self : List[Any] ):
return len(self.encoder )
def _lowercase ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Optional[Any] , __A : Optional[int] ):
if token in self.cache:
return self.cache[token]
snake_case__ : Union[str, Any] = tuple(__A )
snake_case__ : List[Any] = get_pairs(__A )
if not pairs:
return token
while True:
snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : Dict = bigram
snake_case__ : str = []
snake_case__ : Union[str, Any] = 0
while i < len(__A ):
try:
snake_case__ : Dict = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : str = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : str = tuple(__A )
snake_case__ : int = new_word
if len(__A ) == 1:
break
else:
snake_case__ : List[str] = get_pairs(__A )
snake_case__ : List[Any] = " ".join(__A )
snake_case__ : Optional[int] = word
return word
def _lowercase ( self : Optional[Any] , __A : Optional[Any] ):
snake_case__ : List[str] = []
for token in re.findall(self.pat , __A ):
snake_case__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) )
return bpe_tokens
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , __A : Optional[Any] ):
return self.decoder.get(__A )
def _lowercase ( self : Union[str, Any] , __A : Dict ):
snake_case__ : Optional[Any] = "".join(__A )
snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : str = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
snake_case__ : str = 0
with open(__A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case__ : int = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ):
snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
snake_case__ : Optional[int] = " " + text
return (text, kwargs)
def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
snake_case__ : Optional[Any] = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case__ : int = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case__ : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 25 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__lowerCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__lowerCamelCase : str = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__lowerCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def _lowercase ( self : Union[str, Any] , __A : Dict , __A : List[str] , __A : int=None , __A : List[Any]=None , __A : Optional[int]=None , __A : List[Any]=None , __A : Union[str, Any]="auto" , __A : Optional[Any]=-1 , __A : Optional[Any]=0.9 , __A : Any=5 , __A : List[Any]=5_0_0 , __A : Tuple="gpt2-large" , __A : Optional[Any]=-1 , __A : str=1_0_2_4 , __A : Tuple=2_5 , __A : str=5 , __A : Optional[int]=True , __A : Any=2_5 , ):
snake_case__ : List[Any] = compute_mauve(
p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , )
return out
| 25 | 1 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import MobileViTImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : Dict , __A : Optional[Any] , __A : Union[str, Any]=7 , __A : Any=3 , __A : List[str]=1_8 , __A : Any=3_0 , __A : Tuple=4_0_0 , __A : Any=True , __A : Tuple=None , __A : Optional[Any]=True , __A : str=None , __A : str=True , ):
snake_case__ : Optional[int] = size if size is not None else {"shortest_edge": 2_0}
snake_case__ : int = crop_size if crop_size is not None else {"height": 1_8, "width": 1_8}
snake_case__ : Union[str, Any] = parent
snake_case__ : Any = batch_size
snake_case__ : Dict = num_channels
snake_case__ : List[str] = image_size
snake_case__ : Optional[int] = min_resolution
snake_case__ : List[str] = max_resolution
snake_case__ : str = do_resize
snake_case__ : int = size
snake_case__ : Optional[int] = do_center_crop
snake_case__ : List[Any] = crop_size
snake_case__ : Optional[Any] = do_flip_channel_order
def _lowercase ( self : Union[str, Any] ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_center_crop": self.do_center_crop,
"crop_size": self.crop_size,
"do_flip_channel_order": self.do_flip_channel_order,
}
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = MobileViTImageProcessor if is_vision_available() else None
def _lowercase ( self : List[str] ):
snake_case__ : Dict = MobileViTImageProcessingTester(self )
@property
def _lowercase ( self : List[str] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : str ):
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "size" ) )
self.assertTrue(hasattr(__A , "do_center_crop" ) )
self.assertTrue(hasattr(__A , "center_crop" ) )
self.assertTrue(hasattr(__A , "do_flip_channel_order" ) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Any = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 2_0} )
self.assertEqual(image_processor.crop_size , {"height": 1_8, "width": 1_8} )
snake_case__ : Dict = self.image_processing_class.from_dict(self.image_processor_dict , size=4_2 , crop_size=8_4 )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2} )
self.assertEqual(image_processor.crop_size , {"height": 8_4, "width": 8_4} )
def _lowercase ( self : Union[str, Any] ):
pass
def _lowercase ( self : Optional[int] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : List[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case__ : Optional[int] = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self : str ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case__ : Union[str, Any] = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
def _lowercase ( self : int ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
# Test batched
snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["height"],
self.image_processor_tester.crop_size["width"],
) , )
| 25 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCamelCase : Union[str, Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCamelCase : List[Any] = concatenate_datasets
__lowerCamelCase : List[str] = DownloadConfig
__lowerCamelCase : Union[str, Any] = DownloadManager
__lowerCamelCase : str = DownloadMode
__lowerCamelCase : Union[str, Any] = DownloadConfig
__lowerCamelCase : List[str] = DownloadMode
__lowerCamelCase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 25 | 1 |
from typing import Dict
import numpy as np
import torch
from . import residue_constants as rc
from .tensor_utils import tensor_tree_map, tree_map
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict[str, torch.Tensor] ):
snake_case__ : Any = []
snake_case__ : Dict = []
snake_case__ : Tuple = []
for rt in rc.restypes:
snake_case__ : Dict = rc.restype_name_to_atomaa_names[rc.restype_atoa[rt]]
restype_atomaa_to_atomaa_list.append([(rc.atom_order[name] if name else 0) for name in atom_names] )
snake_case__ : Dict = {name: i for i, name in enumerate(snake_case_ )}
restype_atomaa_to_atomaa_list.append(
[(atom_name_to_idxaa[name] if name in atom_name_to_idxaa else 0) for name in rc.atom_types] )
restype_atomaa_mask_list.append([(1.0 if name else 0.0) for name in atom_names] )
# Add dummy mapping for restype 'UNK'
restype_atomaa_to_atomaa_list.append([0] * 14 )
restype_atomaa_to_atomaa_list.append([0] * 37 )
restype_atomaa_mask_list.append([0.0] * 14 )
snake_case__ : int = torch.tensor(
snake_case_ , dtype=torch.intaa , device=protein["aatype"].device , )
snake_case__ : List[str] = torch.tensor(
snake_case_ , dtype=torch.intaa , device=protein["aatype"].device , )
snake_case__ : List[Any] = torch.tensor(
snake_case_ , dtype=torch.floataa , device=protein["aatype"].device , )
snake_case__ : Optional[Any] = protein["aatype"].to(torch.long )
# create the mapping for (residx, atom14) --> atom37, i.e. an array
# with shape (num_res, 14) containing the atom37 indices for this protein
snake_case__ : List[Any] = restype_atomaa_to_atomaa[protein_aatype]
snake_case__ : Any = restype_atomaa_mask[protein_aatype]
snake_case__ : List[str] = residx_atomaa_mask
snake_case__ : List[Any] = residx_atomaa_to_atomaa.long()
# create the gather indices for mapping back
snake_case__ : Union[str, Any] = restype_atomaa_to_atomaa[protein_aatype]
snake_case__ : Union[str, Any] = residx_atomaa_to_atomaa.long()
# create the corresponding mask
snake_case__ : Tuple = torch.zeros([21, 37] , dtype=torch.floataa , device=protein["aatype"].device )
for restype, restype_letter in enumerate(rc.restypes ):
snake_case__ : Any = rc.restype_atoa[restype_letter]
snake_case__ : Optional[Any] = rc.residue_atoms[restype_name]
for atom_name in atom_names:
snake_case__ : List[Any] = rc.atom_order[atom_name]
snake_case__ : Union[str, Any] = 1
snake_case__ : Dict = restype_atomaa_mask[protein_aatype]
snake_case__ : Any = residx_atomaa_mask
return protein
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict[str, torch.Tensor] ):
snake_case__ : Union[str, Any] = tree_map(lambda snake_case_ : torch.tensor(snake_case_ , device=batch["aatype"].device ) , snake_case_ , np.ndarray )
snake_case__ : int = tensor_tree_map(lambda snake_case_ : np.array(snake_case_ ) , make_atomaa_masks(snake_case_ ) )
return out
| 25 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : str = [True] * limit
snake_case__ : str = False
snake_case__ : str = False
snake_case__ : str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
snake_case__ : Optional[Any] = i * 2
while index < limit:
snake_case__ : Union[str, Any] = False
snake_case__ : Any = index + i
snake_case__ : Optional[Any] = [2]
for i in range(3 , snake_case_ , 2 ):
if is_prime[i]:
primes.append(snake_case_ )
return primes
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000000 ):
snake_case__ : Optional[int] = prime_sieve(snake_case_ )
snake_case__ : List[Any] = 0
snake_case__ : List[str] = 0
for i in range(len(snake_case_ ) ):
for j in range(i + length , len(snake_case_ ) ):
snake_case__ : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
snake_case__ : Tuple = j - i
snake_case__ : str = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 25 | 1 |
# Note: if you intend to run this script make sure you look under scripts/fsmt/
# to locate the appropriate script to do the work correctly. There is a set of scripts to:
# - download and prepare data and run the conversion script
# - perform eval to get the best hparam into the config
# - generate model_cards - useful if you have multiple models from the same paper
import argparse
import json
import os
import re
from collections import OrderedDict
from os.path import basename, dirname
import fairseq
import torch
from fairseq import hub_utils
from fairseq.data.dictionary import Dictionary
from transformers import FSMTConfig, FSMTForConditionalGeneration
from transformers.models.fsmt.tokenization_fsmt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
__lowerCamelCase : int = 2
# based on the results of a search on a range of `num_beams`, `length_penalty` and `early_stopping`
# values against wmt19 test data to obtain the best BLEU scores, we will use the following defaults:
#
# * `num_beams`: 5 (higher scores better, but requires more memory/is slower, can be adjusted by users)
# * `early_stopping`: `False` consistently scored better
# * `length_penalty` varied, so will assign the best one depending on the model
__lowerCamelCase : List[str] = {
# fairseq:
"""wmt19-ru-en""": {"""length_penalty""": 1.1},
"""wmt19-en-ru""": {"""length_penalty""": 1.15},
"""wmt19-en-de""": {"""length_penalty""": 1.0},
"""wmt19-de-en""": {"""length_penalty""": 1.1},
# allenai:
"""wmt16-en-de-dist-12-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-dist-6-1""": {"""length_penalty""": 0.6},
"""wmt16-en-de-12-1""": {"""length_penalty""": 0.8},
"""wmt19-de-en-6-6-base""": {"""length_penalty""": 0.6},
"""wmt19-de-en-6-6-big""": {"""length_penalty""": 0.6},
}
# this remaps the different models to their organization names
__lowerCamelCase : Tuple = {}
for m in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCamelCase : Tuple = """facebook"""
for m in [
"wmt16-en-de-dist-12-1",
"wmt16-en-de-dist-6-1",
"wmt16-en-de-12-1",
"wmt19-de-en-6-6-base",
"wmt19-de-en-6-6-big",
]:
__lowerCamelCase : List[Any] = """allenai"""
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# (1) remove word breaking symbol, (2) add word ending symbol where the word is not broken up,
# e.g.: d = {'le@@': 5, 'tt@@': 6, 'er': 7} => {'le': 5, 'tt': 6, 'er</w>': 7}
snake_case__ : Any = dict((re.sub(R"@@$" , "" , snake_case_ ), v) if k.endswith("@@" ) else (re.sub(R"$" , "</w>" , snake_case_ ), v) for k, v in d.items() )
snake_case__ : List[Any] = "<s> <pad> </s> <unk>".split()
# restore the special tokens
for k in keep_keys:
del da[F'''{k}</w>''']
snake_case__ : List[str] = d[k] # restore
return da
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] ):
# prep
assert os.path.exists(snake_case_ )
os.makedirs(snake_case_ , exist_ok=snake_case_ )
print(F'''Writing results to {pytorch_dump_folder_path}''' )
# handle various types of models
snake_case__ : Any = basename(snake_case_ )
snake_case__ : int = dirname(snake_case_ )
snake_case__ : Optional[Any] = fairseq.model_parallel.models.transformer.ModelParallelTransformerModel
snake_case__ : List[Any] = cls.hub_models()
snake_case__ : List[str] = {"bpe": "fastbpe", "tokenizer": "moses"}
snake_case__ : Union[str, Any] = "."
# note: since the model dump is old, fairseq has upgraded its model some
# time later, and it does a whole lot of rewrites and splits on the saved
# weights, therefore we can't use torch.load() directly on the model file.
# see: upgrade_state_dict(state_dict) in fairseq_model.py
print(F'''using checkpoint {checkpoint_file}''' )
snake_case__ : List[str] = hub_utils.from_pretrained(
snake_case_ , snake_case_ , snake_case_ , archive_map=snake_case_ , **snake_case_ )
snake_case__ : Union[str, Any] = vars(chkpt["args"]["model"] )
snake_case__ : Any = args["source_lang"]
snake_case__ : Union[str, Any] = args["target_lang"]
snake_case__ : Tuple = dirname(snake_case_ )
snake_case__ : List[Any] = basename(snake_case_ )
# dicts
snake_case__ : List[Any] = os.path.join(snake_case_ , F'''dict.{src_lang}.txt''' )
snake_case__ : Optional[int] = os.path.join(snake_case_ , F'''dict.{tgt_lang}.txt''' )
snake_case__ : Tuple = Dictionary.load(snake_case_ )
snake_case__ : Optional[int] = rewrite_dict_keys(src_dict.indices )
snake_case__ : Optional[int] = len(snake_case_ )
snake_case__ : Optional[int] = os.path.join(snake_case_ , "vocab-src.json" )
print(F'''Generating {src_vocab_file} of {src_vocab_size} of {src_lang} records''' )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case_ , ensure_ascii=snake_case_ , indent=snake_case_ ) )
# detect whether this is a do_lower_case situation, which can be derived by checking whether we
# have at least one uppercase letter in the source vocab
snake_case__ : int = True
for k in src_vocab.keys():
if not k.islower():
snake_case__ : int = False
break
snake_case__ : str = Dictionary.load(snake_case_ )
snake_case__ : Dict = rewrite_dict_keys(tgt_dict.indices )
snake_case__ : List[Any] = len(snake_case_ )
snake_case__ : Any = os.path.join(snake_case_ , "vocab-tgt.json" )
print(F'''Generating {tgt_vocab_file} of {tgt_vocab_size} of {tgt_lang} records''' )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case_ , ensure_ascii=snake_case_ , indent=snake_case_ ) )
# merges_file (bpecodes)
snake_case__ : Dict = os.path.join(snake_case_ , VOCAB_FILES_NAMES["merges_file"] )
for fn in ["bpecodes", "code"]: # older fairseq called the merges file "code"
snake_case__ : str = os.path.join(snake_case_ , snake_case_ )
if os.path.exists(snake_case_ ):
break
with open(snake_case_ , encoding="utf-8" ) as fin:
snake_case__ : str = fin.read()
snake_case__ : str = re.sub(R" \d+$" , "" , snake_case_ , 0 , re.M ) # remove frequency number
print(F'''Generating {merges_file}''' )
with open(snake_case_ , "w" , encoding="utf-8" ) as fout:
fout.write(snake_case_ )
# model config
snake_case__ : int = os.path.join(snake_case_ , "config.json" )
# validate bpe/tokenizer config, as currently it's hardcoded to moses+fastbpe -
# may have to modify the tokenizer if a different type is used by a future model
assert args["bpe"] == "fastbpe", F'''need to extend tokenizer to support bpe={args['bpe']}'''
assert args["tokenizer"] == "moses", F'''need to extend tokenizer to support bpe={args['tokenizer']}'''
snake_case__ : Dict = {
"architectures": ["FSMTForConditionalGeneration"],
"model_type": "fsmt",
"activation_dropout": args["activation_dropout"],
"activation_function": "relu",
"attention_dropout": args["attention_dropout"],
"d_model": args["decoder_embed_dim"],
"dropout": args["dropout"],
"init_std": 0.02,
"max_position_embeddings": args["max_source_positions"],
"num_hidden_layers": args["encoder_layers"],
"src_vocab_size": src_vocab_size,
"tgt_vocab_size": tgt_vocab_size,
"langs": [src_lang, tgt_lang],
"encoder_attention_heads": args["encoder_attention_heads"],
"encoder_ffn_dim": args["encoder_ffn_embed_dim"],
"encoder_layerdrop": args["encoder_layerdrop"],
"encoder_layers": args["encoder_layers"],
"decoder_attention_heads": args["decoder_attention_heads"],
"decoder_ffn_dim": args["decoder_ffn_embed_dim"],
"decoder_layerdrop": args["decoder_layerdrop"],
"decoder_layers": args["decoder_layers"],
"bos_token_id": 0,
"pad_token_id": 1,
"eos_token_id": 2,
"is_encoder_decoder": True,
"scale_embedding": not args["no_scale_embedding"],
"tie_word_embeddings": args["share_all_embeddings"],
}
# good hparam defaults to start with
snake_case__ : Any = 5
snake_case__ : Dict = False
if model_dir in best_score_hparams and "length_penalty" in best_score_hparams[model_dir]:
snake_case__ : Dict = best_score_hparams[model_dir]["length_penalty"]
else:
snake_case__ : Optional[int] = 1.0
print(F'''Generating {fsmt_model_config_file}''' )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case_ , ensure_ascii=snake_case_ , indent=snake_case_ ) )
# tokenizer config
snake_case__ : str = os.path.join(snake_case_ , snake_case_ )
snake_case__ : int = {
"langs": [src_lang, tgt_lang],
"model_max_length": 1024,
"do_lower_case": do_lower_case,
}
print(F'''Generating {fsmt_tokenizer_config_file}''' )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(snake_case_ , ensure_ascii=snake_case_ , indent=snake_case_ ) )
# model
snake_case__ : Tuple = chkpt["models"][0]
snake_case__ : List[Any] = model.state_dict()
# rename keys to start with 'model.'
snake_case__ : Dict = OrderedDict(("model." + k, v) for k, v in model_state_dict.items() )
# remove unneeded keys
snake_case__ : int = [
"model.model",
"model.encoder.version",
"model.decoder.version",
"model.encoder_embed_tokens.weight",
"model.decoder_embed_tokens.weight",
"model.encoder.embed_positions._float_tensor",
"model.decoder.embed_positions._float_tensor",
]
for k in ignore_keys:
model_state_dict.pop(snake_case_ , snake_case_ )
snake_case__ : Union[str, Any] = FSMTConfig.from_pretrained(snake_case_ )
snake_case__ : Tuple = FSMTForConditionalGeneration(snake_case_ )
# check that it loads ok
model_new.load_state_dict(snake_case_ , strict=snake_case_ )
# save
snake_case__ : List[str] = os.path.join(snake_case_ , snake_case_ )
print(F'''Generating {pytorch_weights_dump_path}''' )
torch.save(snake_case_ , snake_case_ )
print("Conversion is done!" )
print("\nLast step is to upload the files to s3" )
print(F'''cd {data_root}''' )
print(F'''transformers-cli upload {model_dir}''' )
if __name__ == "__main__":
__lowerCamelCase : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--fsmt_checkpoint_path""",
default=None,
type=str,
required=True,
help=(
"""Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,"""
""" bpecodes, etc."""
),
)
parser.add_argument(
"""--pytorch_dump_folder_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_fsmt_checkpoint_to_pytorch(args.fsmt_checkpoint_path, args.pytorch_dump_folder_path)
| 25 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Optional[Any] = parent
snake_case__ : str = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : Optional[Any] = min_resolution
snake_case__ : List[str] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : str = size
snake_case__ : str = do_normalize
snake_case__ : Optional[Any] = image_mean
snake_case__ : List[str] = image_std
snake_case__ : List[str] = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : Tuple = do_pad
def _lowercase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ):
if not batched:
snake_case__ : List[Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : str = image.size
else:
snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Any = int(self.size["shortest_edge"] * h / w )
snake_case__ : Any = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Any = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Tuple = self.size["shortest_edge"]
snake_case__ : int = self.size["shortest_edge"]
else:
snake_case__ : Any = []
for image in image_inputs:
snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : int = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Tuple ):
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : str ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : int ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[Any] ):
# prepare image and target
snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Tuple = json.loads(f.read() )
snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : str = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image, target and masks_path
snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 1 |
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : bool = False ):
if not isinstance(snake_case_ , snake_case_ ):
snake_case__ : List[str] = F'''Expected string as input, found {type(snake_case_ )}'''
raise ValueError(snake_case_ )
if not isinstance(snake_case_ , snake_case_ ):
snake_case__ : Tuple = F'''Expected boolean as use_pascal parameter, found {type(snake_case_ )}'''
raise ValueError(snake_case_ )
snake_case__ : Tuple = input_str.split("_" )
snake_case__ : str = 0 if use_pascal else 1
snake_case__ : Optional[int] = words[start_index:]
snake_case__ : Optional[Any] = [word[0].upper() + word[1:] for word in words_to_capitalize]
snake_case__ : List[str] = "" if use_pascal else words[0]
return "".join([initial_word, *capitalized_words] )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 25 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCamelCase : Tuple = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
snake_case__ : Optional[int] = bs[:]
snake_case__ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
snake_case__ : Dict = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__ : Dict = set()
snake_case__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : List[Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ):
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="utf-8" ) as vocab_handle:
snake_case__ : Any = json.load(__A )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
snake_case__ : Union[str, Any] = errors # how to handle errors in decoding
snake_case__ : Any = bytes_to_unicode()
snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="utf-8" ) as merges_handle:
snake_case__ : str = merges_handle.read().split("\n" )[1:-1]
snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Optional[int] = {}
snake_case__ : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _lowercase ( self : List[Any] ):
return len(self.encoder )
def _lowercase ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Optional[Any] , __A : Optional[int] ):
if token in self.cache:
return self.cache[token]
snake_case__ : Union[str, Any] = tuple(__A )
snake_case__ : List[Any] = get_pairs(__A )
if not pairs:
return token
while True:
snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : Dict = bigram
snake_case__ : str = []
snake_case__ : Union[str, Any] = 0
while i < len(__A ):
try:
snake_case__ : Dict = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : str = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : str = tuple(__A )
snake_case__ : int = new_word
if len(__A ) == 1:
break
else:
snake_case__ : List[str] = get_pairs(__A )
snake_case__ : List[Any] = " ".join(__A )
snake_case__ : Optional[int] = word
return word
def _lowercase ( self : Optional[Any] , __A : Optional[Any] ):
snake_case__ : List[str] = []
for token in re.findall(self.pat , __A ):
snake_case__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) )
return bpe_tokens
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , __A : Optional[Any] ):
return self.decoder.get(__A )
def _lowercase ( self : Union[str, Any] , __A : Dict ):
snake_case__ : Optional[Any] = "".join(__A )
snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : str = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
snake_case__ : str = 0
with open(__A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case__ : int = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ):
snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
snake_case__ : Optional[int] = " " + text
return (text, kwargs)
def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
snake_case__ : Optional[Any] = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case__ : int = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case__ : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 25 | 1 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError("multiplicative_persistence() only accepts integral values" )
if num < 0:
raise ValueError("multiplicative_persistence() does not accept negative values" )
snake_case__ : Optional[int] = 0
snake_case__ : List[str] = str(snake_case_ )
while len(snake_case_ ) != 1:
snake_case__ : Optional[Any] = [int(snake_case_ ) for i in num_string]
snake_case__ : int = 1
for i in range(0 , len(snake_case_ ) ):
total *= numbers[i]
snake_case__ : Tuple = str(snake_case_ )
steps += 1
return steps
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
if not isinstance(snake_case_ , snake_case_ ):
raise ValueError("additive_persistence() only accepts integral values" )
if num < 0:
raise ValueError("additive_persistence() does not accept negative values" )
snake_case__ : Any = 0
snake_case__ : List[str] = str(snake_case_ )
while len(snake_case_ ) != 1:
snake_case__ : int = [int(snake_case_ ) for i in num_string]
snake_case__ : Any = 0
for i in range(0 , len(snake_case_ ) ):
total += numbers[i]
snake_case__ : Tuple = str(snake_case_ )
steps += 1
return steps
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 25 | 1 |
from collections import Counter
from timeit import timeit
def SCREAMING_SNAKE_CASE ( snake_case_ : str = "" , ):
return sum(c % 2 for c in Counter(input_str.replace(" " , "" ).lower() ).values() ) < 2
def SCREAMING_SNAKE_CASE ( snake_case_ : str = "" ):
if len(snake_case_ ) == 0:
return True
snake_case__ : Tuple = input_str.replace(" " , "" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
snake_case__ : dict[str, int] = {}
for character in lower_case_input_str:
snake_case__ : Tuple = character_freq_dict.get(snake_case_ , 0 ) + 1
snake_case__ : str = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def SCREAMING_SNAKE_CASE ( snake_case_ : str = "" ):
print("\nFor string = " , snake_case_ , ":" )
print(
"> can_string_be_rearranged_as_palindrome_counter()" , "\tans =" , can_string_be_rearranged_as_palindrome_counter(snake_case_ ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome_counter(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
print(
"> can_string_be_rearranged_as_palindrome()" , "\tans =" , can_string_be_rearranged_as_palindrome(snake_case_ ) , "\ttime =" , timeit(
"z.can_string_be_rearranged_as_palindrome(z.check_str)" , setup="import __main__ as z" , ) , "seconds" , )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = input(
"""Enter string to determine if it can be rearranged as a palindrome or not: """
).strip()
benchmark(check_str)
__lowerCamelCase : Any = can_string_be_rearranged_as_palindrome_counter(check_str)
print(f"{check_str} can {'' if status else 'not '}be rearranged as a palindrome")
| 25 |
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Any = [0] * len(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
# use last results for better performance - dynamic programming
snake_case__ : Union[str, Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
snake_case__ : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
snake_case__ : int = j
return prefix_result
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return max(prefix_function(snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
import heapq
def SCREAMING_SNAKE_CASE ( snake_case_ : dict ):
snake_case__ : list[list] = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(snake_case_ , [-1 * len(snake_case_ ), (key, value)] )
# chosen_vertices = set of chosen vertices
snake_case__ : Tuple = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
snake_case__ : List[Any] = heapq.heappop(snake_case_ )[1][0]
chosen_vertices.add(snake_case_ )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
snake_case__ : Tuple = elem[1][1].index(snake_case_ )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(snake_case_ )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : Dict = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(f"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}")
| 25 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[int] = get_logger()
__lowerCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case__ : str = str(jax.devices()[0] )
snake_case__ : str = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def _lowercase ( self : Optional[Any] , __A : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def _lowercase ( self : int , __A : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ : Optional[int] = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case__ : Any = {"dtype": jnp.intaa}
else:
snake_case__ : Tuple = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
snake_case__ : Optional[Any] = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ):
snake_case__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def _lowercase ( self : Tuple , __A : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def _lowercase ( self : Optional[int] , __A : pa.Table ):
snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A )
snake_case__ : Tuple = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def _lowercase ( self : Optional[Any] , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A )
snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
snake_case__ : Dict = self._consolidate(__A )
return column
def _lowercase ( self : str , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A )
snake_case__ : int = self.python_features_decoder.decode_batch(__A )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
for column_name in batch:
snake_case__ : Any = self._consolidate(batch[column_name] )
return batch
| 25 | 1 |
from __future__ import annotations
import inspect
import unittest
from transformers import ViTConfig
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import cached_property, is_tf_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFViTForImageClassification, TFViTModel
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : Optional[int]=1_3 , __A : Dict=3_0 , __A : str=2 , __A : List[str]=3 , __A : Union[str, Any]=True , __A : List[Any]=True , __A : List[Any]=3_2 , __A : str=2 , __A : Any=4 , __A : Dict=3_7 , __A : Optional[int]="gelu" , __A : List[str]=0.1 , __A : List[str]=0.1 , __A : str=1_0 , __A : Any=0.0_2 , __A : str=3 , __A : Any=None , ):
snake_case__ : Optional[int] = parent
snake_case__ : str = batch_size
snake_case__ : Optional[Any] = image_size
snake_case__ : Tuple = patch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : List[Any] = is_training
snake_case__ : Optional[int] = use_labels
snake_case__ : Union[str, Any] = hidden_size
snake_case__ : Any = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = intermediate_size
snake_case__ : Any = hidden_act
snake_case__ : str = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : Optional[Any] = type_sequence_label_size
snake_case__ : Optional[Any] = initializer_range
snake_case__ : str = scope
# in ViT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
snake_case__ : Any = (image_size // patch_size) ** 2
snake_case__ : Optional[Any] = num_patches + 1
def _lowercase ( self : Optional[Any] ):
snake_case__ : Tuple = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Tuple = None
if self.use_labels:
snake_case__ : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Tuple = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Dict ):
return ViTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__A , initializer_range=self.initializer_range , )
def _lowercase ( self : Optional[Any] , __A : Union[str, Any] , __A : Any , __A : Optional[int] ):
snake_case__ : List[Any] = TFViTModel(config=__A )
snake_case__ : Union[str, Any] = model(__A , training=__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
# Test with an image with different size than the one specified in config.
snake_case__ : Optional[Any] = self.image_size // 2
snake_case__ : Dict = pixel_values[:, :, :image_size, :image_size]
snake_case__ : Any = model(__A , interpolate_pos_encoding=__A , training=__A )
snake_case__ : Any = (image_size // self.patch_size) ** 2 + 1
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, seq_length, self.hidden_size) )
def _lowercase ( self : Optional[int] , __A : int , __A : List[Any] , __A : List[Any] ):
snake_case__ : Any = self.type_sequence_label_size
snake_case__ : Optional[Any] = TFViTForImageClassification(__A )
snake_case__ : List[str] = model(__A , labels=__A , training=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# Test with an image with different size than the one specified in config.
snake_case__ : Tuple = self.image_size // 2
snake_case__ : str = pixel_values[:, :, :image_size, :image_size]
snake_case__ : int = model(__A , interpolate_pos_encoding=__A , training=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
snake_case__ : Union[str, Any] = 1
snake_case__ : Union[str, Any] = TFViTForImageClassification(__A )
snake_case__ : Tuple = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
snake_case__ : str = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = self.prepare_config_and_inputs()
snake_case__, snake_case__, snake_case__ : Any = config_and_inputs
snake_case__ : int = {"pixel_values": pixel_values}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (TFViTModel, TFViTForImageClassification) if is_tf_available() else ()
a_ = (
{"feature-extraction": TFViTModel, "image-classification": TFViTForImageClassification}
if is_tf_available()
else {}
)
a_ = False
a_ = False
a_ = False
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Any = TFViTModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 )
def _lowercase ( self : Any ):
self.config_tester.run_common_tests()
@unittest.skip(reason="ViT does not use inputs_embeds" )
def _lowercase ( self : Optional[int] ):
pass
@unittest.skip(reason="ViT does not use inputs_embeds" )
def _lowercase ( self : Optional[int] ):
pass
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (tf.keras.layers.Layer) )
snake_case__ : Optional[int] = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , tf.keras.layers.Layer ) )
def _lowercase ( self : Dict ):
snake_case__, snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[Any] = model_class(__A )
snake_case__ : Union[str, Any] = inspect.signature(model.call )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Dict = [*signature.parameters.keys()]
snake_case__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _lowercase ( self : Dict ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
@slow
def _lowercase ( self : str ):
snake_case__ : List[str] = TFViTModel.from_pretrained("google/vit-base-patch16-224" )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_tf
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Union[str, Any] ):
return ViTImageProcessor.from_pretrained("google/vit-base-patch16-224" ) if is_vision_available() else None
@slow
def _lowercase ( self : int ):
snake_case__ : Tuple = TFViTForImageClassification.from_pretrained("google/vit-base-patch16-224" )
snake_case__ : List[str] = self.default_image_processor
snake_case__ : Optional[int] = prepare_img()
snake_case__ : int = image_processor(images=__A , return_tensors="tf" )
# forward pass
snake_case__ : Dict = model(**__A )
# verify the logits
snake_case__ : str = tf.TensorShape((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __A )
snake_case__ : int = tf.constant([-0.2_7_4_4, 0.8_2_1_5, -0.0_8_3_6] )
tf.debugging.assert_near(outputs.logits[0, :3] , __A , atol=1e-4 )
| 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 1 |
import importlib
import json
import os
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
import transformers.models.auto
from transformers.models.auto.configuration_auto import CONFIG_MAPPING, AutoConfig
from transformers.models.bert.configuration_bert import BertConfig
from transformers.models.roberta.configuration_roberta import RobertaConfig
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / """utils"""))
from test_module.custom_configuration import CustomConfig # noqa E402
__lowerCamelCase : Union[str, Any] = get_tests_dir("""fixtures/dummy-config.json""")
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[int] ):
snake_case__ : str = 0
def _lowercase ( self : int ):
self.assertIsNotNone(transformers.models.auto.__spec__ )
self.assertIsNotNone(importlib.util.find_spec("transformers.models.auto" ) )
def _lowercase ( self : List[Any] ):
snake_case__ : str = AutoConfig.from_pretrained("bert-base-uncased" )
self.assertIsInstance(__A , __A )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[int] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def _lowercase ( self : List[Any] ):
snake_case__ : Any = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
def _lowercase ( self : List[Any] ):
snake_case__ : Tuple = AutoConfig.for_model("roberta" )
self.assertIsInstance(__A , __A )
def _lowercase ( self : Tuple ):
with tempfile.TemporaryDirectory() as tmp_dir:
# This model name contains bert and roberta, but roberta ends up being picked.
snake_case__ : Any = os.path.join(__A , "fake-roberta" )
os.makedirs(__A , exist_ok=__A )
with open(os.path.join(__A , "config.json" ) , "w" ) as f:
f.write(json.dumps({} ) )
snake_case__ : List[Any] = AutoConfig.from_pretrained(__A )
self.assertEqual(type(__A ) , __A )
def _lowercase ( self : List[str] ):
try:
AutoConfig.register("custom" , __A )
# Wrong model type will raise an error
with self.assertRaises(__A ):
AutoConfig.register("model" , __A )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__A ):
AutoConfig.register("bert" , __A )
# Now that the config is registered, it can be used as any other config with the auto-API
snake_case__ : str = CustomConfig()
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
snake_case__ : List[str] = AutoConfig.from_pretrained(__A )
self.assertIsInstance(__A , __A )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
def _lowercase ( self : Union[str, Any] ):
with self.assertRaisesRegex(
__A , "bert-base is not a local folder and is not a valid model identifier" ):
snake_case__ : Any = AutoConfig.from_pretrained("bert-base" )
def _lowercase ( self : int ):
with self.assertRaisesRegex(
__A , R"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
snake_case__ : Optional[Any] = AutoConfig.from_pretrained(__A , revision="aaaaaa" )
def _lowercase ( self : Any ):
with self.assertRaisesRegex(
__A , "hf-internal-testing/no-config-test-repo does not appear to have a file named config.json." , ):
snake_case__ : Any = AutoConfig.from_pretrained("hf-internal-testing/no-config-test-repo" )
def _lowercase ( self : List[str] ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__A ):
snake_case__ : Dict = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__A ):
snake_case__ : Union[str, Any] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__A )
snake_case__ : Optional[int] = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
# Test config can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
config.save_pretrained(__A )
snake_case__ : Union[str, Any] = AutoConfig.from_pretrained(__A , trust_remote_code=__A )
self.assertEqual(reloaded_config.__class__.__name__ , "NewModelConfig" )
def _lowercase ( self : Any ):
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "new-model"
try:
AutoConfig.register("new-model" , __A )
# If remote code is not set, the default is to use local
snake_case__ : Any = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote code is disabled, we load the local one.
snake_case__ : Dict = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , "NewModelConfigLocal" )
# If remote is enabled, we load from the Hub
snake_case__ : Dict = AutoConfig.from_pretrained("hf-internal-testing/test_dynamic_model" , trust_remote_code=__A )
self.assertEqual(config.__class__.__name__ , "NewModelConfig" )
finally:
if "new-model" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["new-model"]
| 25 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Tuple = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def _lowercase ( self : Dict ):
snake_case__ : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Union[str, Any] = get_activation("gelu" )
snake_case__ : int = get_activation("gelu_10" )
snake_case__ : Optional[int] = torch_builtin(__A )
snake_case__ : Dict = geluaa(__A )
snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowercase ( self : str ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__A ):
get_activation("bogus" )
with self.assertRaises(__A ):
get_activation(__A )
def _lowercase ( self : List[str] ):
snake_case__ : List[str] = get_activation("gelu" )
snake_case__ : Any = 1
snake_case__ : Union[str, Any] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
snake_case__ : int = acta.a
| 25 | 1 |
import copy
import inspect
import unittest
import numpy as np
from huggingface_hub import hf_hub_download
from transformers import TimesformerConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING,
TimesformerForVideoClassification,
TimesformerModel,
)
from transformers.models.timesformer.modeling_timesformer import TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from transformers import VideoMAEImageProcessor
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : Tuple , __A : Optional[int]=1_3 , __A : Tuple=1_0 , __A : List[Any]=3 , __A : Tuple=2 , __A : List[str]=2 , __A : Tuple=True , __A : Optional[Any]=True , __A : Optional[Any]=3_2 , __A : Optional[Any]=5 , __A : Any=4 , __A : Any=3_7 , __A : Optional[int]="gelu" , __A : List[str]=0.1 , __A : int=0.1 , __A : Dict=1_0 , __A : str=0.0_2 , __A : str="divided_space_time" , __A : List[Any]=None , ):
snake_case__ : List[str] = parent
snake_case__ : str = batch_size
snake_case__ : Optional[Any] = image_size
snake_case__ : Dict = num_channels
snake_case__ : List[str] = patch_size
snake_case__ : int = num_frames
snake_case__ : List[str] = is_training
snake_case__ : Union[str, Any] = use_labels
snake_case__ : int = hidden_size
snake_case__ : List[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = intermediate_size
snake_case__ : Tuple = hidden_act
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : List[str] = attention_type
snake_case__ : int = initializer_range
snake_case__ : List[str] = scope
snake_case__ : Any = num_labels
# in TimeSformer, the number of spatial tokens equals num_frames * num_patches per frame + 1 CLS token
snake_case__ : List[Any] = (image_size // patch_size) ** 2
snake_case__ : str = (num_frames) * self.num_patches_per_frame + 1
def _lowercase ( self : Any ):
snake_case__ : Optional[int] = floats_tensor(
[self.batch_size, self.num_frames, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Any = None
if self.use_labels:
snake_case__ : str = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : Optional[Any] = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Any ):
snake_case__ : Tuple = TimesformerConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , num_frames=self.num_frames , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , initializer_range=self.initializer_range , attention_type=self.attention_type , )
snake_case__ : List[Any] = self.num_labels
return config
def _lowercase ( self : Dict , __A : Tuple , __A : str , __A : Union[str, Any] ):
snake_case__ : Any = TimesformerModel(config=__A )
model.to(__A )
model.eval()
snake_case__ : int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Dict , __A : List[Any] , __A : Tuple , __A : str ):
snake_case__ : Any = TimesformerForVideoClassification(__A )
model.to(__A )
model.eval()
snake_case__ : Dict = model(__A )
# verify the logits shape
snake_case__ : Union[str, Any] = torch.Size((self.batch_size, self.num_labels) )
self.parent.assertEqual(result.logits.shape , __A )
def _lowercase ( self : Tuple ):
snake_case__ : Dict = self.prepare_config_and_inputs()
snake_case__, snake_case__, snake_case__ : List[str] = config_and_inputs
snake_case__ : Tuple = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (TimesformerModel, TimesformerForVideoClassification) if is_torch_available() else ()
a_ = (
{"feature-extraction": TimesformerModel, "video-classification": TimesformerForVideoClassification}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
def _lowercase ( self : Any ):
snake_case__ : Optional[int] = TimesformerModelTester(self )
snake_case__ : Tuple = ConfigTester(
self , config_class=__A , has_text_modality=__A , hidden_size=3_7 )
def _lowercase ( self : Dict , __A : Tuple , __A : Dict , __A : Union[str, Any]=False ):
snake_case__ : Optional[Any] = copy.deepcopy(__A )
if return_labels:
if model_class in get_values(__A ):
snake_case__ : List[str] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _lowercase ( self : List[str] ):
self.config_tester.run_common_tests()
@unittest.skip(reason="TimeSformer does not use inputs_embeds" )
def _lowercase ( self : Union[str, Any] ):
pass
def _lowercase ( self : Dict ):
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__A )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
snake_case__ : Tuple = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__A , nn.Linear ) )
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = model_class(__A )
snake_case__ : Tuple = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Union[str, Any] = [*signature.parameters.keys()]
snake_case__ : int = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_video_classification(*__A )
@slow
def _lowercase ( self : Union[str, Any] ):
for model_name in TIMESFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[str] = TimesformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _lowercase ( self : Tuple ):
if not self.has_attentions:
pass
else:
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = True
for model_class in self.all_model_classes:
snake_case__ : List[Any] = self.model_tester.seq_length
snake_case__ : Optional[int] = self.model_tester.num_frames
snake_case__ : int = True
snake_case__ : Optional[Any] = False
snake_case__ : Optional[int] = True
snake_case__ : Optional[int] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : str = model(**self._prepare_for_class(__A , __A ) )
snake_case__ : Dict = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
snake_case__ : List[Any] = True
snake_case__ : List[str] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : int = model(**self._prepare_for_class(__A , __A ) )
snake_case__ : Dict = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
snake_case__ : Dict = len(__A )
# Check attention is always last and order is fine
snake_case__ : Union[str, Any] = True
snake_case__ : List[str] = True
snake_case__ : Optional[Any] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : str = model(**self._prepare_for_class(__A , __A ) )
self.assertEqual(out_len + 1 , len(__A ) )
snake_case__ : str = outputs.attentions
self.assertEqual(len(__A ) , self.model_tester.num_hidden_layers )
# attentions has shape (batch_size x num_frames) x num_heads x (num_patches per frame + 1) x (num_patches per frame + 1)
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len // num_frames + 1, seq_len // num_frames + 1] , )
def _lowercase ( self : Optional[int] ):
def check_hidden_states_output(__A : str , __A : Dict , __A : Optional[Any] ):
snake_case__ : Optional[int] = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : str = model(**self._prepare_for_class(__A , __A ) )
snake_case__ : Dict = outputs.hidden_states
snake_case__ : Union[str, Any] = self.model_tester.num_hidden_layers + 1
self.assertEqual(len(__A ) , __A )
snake_case__ : Tuple = self.model_tester.seq_length
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : int = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : str = True
check_hidden_states_output(__A , __A , __A )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : List[Any] = hf_hub_download(
repo_id="hf-internal-testing/spaghetti-video" , filename="eating_spaghetti.npy" , repo_type="dataset" )
snake_case__ : List[Any] = np.load(snake_case_ )
return list(snake_case_ )
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : int ):
# logits were tested with a different mean and std, so we use the same here
return (
VideoMAEImageProcessor(image_mean=[0.5, 0.5, 0.5] , image_std=[0.5, 0.5, 0.5] )
if is_vision_available()
else None
)
@slow
def _lowercase ( self : Optional[int] ):
snake_case__ : Tuple = TimesformerForVideoClassification.from_pretrained("facebook/timesformer-base-finetuned-k400" ).to(
__A )
snake_case__ : Tuple = self.default_image_processor
snake_case__ : str = prepare_video()
snake_case__ : str = image_processor(video[:8] , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
snake_case__ : Optional[Any] = model(**__A )
# verify the logits
snake_case__ : Optional[Any] = torch.Size((1, 4_0_0) )
self.assertEqual(outputs.logits.shape , __A )
snake_case__ : Tuple = torch.tensor([-0.3_0_1_6, -0.7_7_1_3, -0.4_2_0_5] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
| 25 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : str = value
elif weight_type == "weight_g":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : str = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
snake_case__ : str = []
snake_case__ : Optional[int] = fairseq_model.state_dict()
snake_case__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[Any] = "weight"
else:
snake_case__ : Optional[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
snake_case__ : Tuple = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : str = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ):
if config_path is not None:
snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = UniSpeechSatConfig()
snake_case__ : str = ""
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ )
else:
snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ )
snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25 | 1 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = ["pixel_values"]
def __init__( self : Tuple , __A : bool = True , __A : Optional[Dict[str, int]] = None , __A : PILImageResampling = PILImageResampling.BILINEAR , __A : bool = True , __A : Dict[str, int] = None , __A : bool = True , __A : Union[int, float] = 1 / 2_5_5 , __A : bool = True , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , **__A : List[str] , ):
super().__init__(**__A )
snake_case__ : List[Any] = size if size is not None else {"shortest_edge": 2_5_6}
snake_case__ : List[str] = get_size_dict(__A , default_to_square=__A )
snake_case__ : str = crop_size if crop_size is not None else {"height": 2_2_4, "width": 2_2_4}
snake_case__ : Optional[int] = get_size_dict(__A )
snake_case__ : Dict = do_resize
snake_case__ : Union[str, Any] = size
snake_case__ : Optional[int] = resample
snake_case__ : Dict = do_center_crop
snake_case__ : int = crop_size
snake_case__ : str = do_rescale
snake_case__ : Optional[Any] = rescale_factor
snake_case__ : Dict = do_normalize
snake_case__ : List[Any] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
snake_case__ : Union[str, Any] = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _lowercase ( self : Tuple , __A : np.ndarray , __A : Dict[str, int] , __A : PILImageResampling = PILImageResampling.BICUBIC , __A : Optional[Union[str, ChannelDimension]] = None , **__A : int , ):
snake_case__ : Tuple = get_size_dict(__A , default_to_square=__A )
if "shortest_edge" not in size:
raise ValueError(f'''The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}''' )
snake_case__ : Dict = get_resize_output_image_size(__A , size=size["shortest_edge"] , default_to_square=__A )
return resize(__A , size=__A , resample=__A , data_format=__A , **__A )
def _lowercase ( self : str , __A : np.ndarray , __A : Dict[str, int] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str , ):
snake_case__ : Dict = get_size_dict(__A )
return center_crop(__A , size=(size["height"], size["width"]) , data_format=__A , **__A )
def _lowercase ( self : Dict , __A : np.ndarray , __A : float , __A : Optional[Union[str, ChannelDimension]] = None , **__A : str ):
return rescale(__A , scale=__A , data_format=__A , **__A )
def _lowercase ( self : Tuple , __A : np.ndarray , __A : Union[float, List[float]] , __A : Union[float, List[float]] , __A : Optional[Union[str, ChannelDimension]] = None , **__A : Optional[Any] , ):
return normalize(__A , mean=__A , std=__A , data_format=__A , **__A )
def _lowercase ( self : Tuple , __A : ImageInput , __A : Optional[bool] = None , __A : Dict[str, int] = None , __A : PILImageResampling = None , __A : bool = None , __A : Dict[str, int] = None , __A : Optional[bool] = None , __A : Optional[float] = None , __A : Optional[bool] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[float, List[float]]] = None , __A : Optional[Union[str, TensorType]] = None , __A : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__A : Optional[int] , ):
snake_case__ : Any = do_resize if do_resize is not None else self.do_resize
snake_case__ : Union[str, Any] = size if size is not None else self.size
snake_case__ : Optional[Any] = get_size_dict(__A , default_to_square=__A )
snake_case__ : Dict = resample if resample is not None else self.resample
snake_case__ : Optional[int] = do_center_crop if do_center_crop is not None else self.do_center_crop
snake_case__ : List[str] = crop_size if crop_size is not None else self.crop_size
snake_case__ : Optional[int] = get_size_dict(__A )
snake_case__ : int = do_rescale if do_rescale is not None else self.do_rescale
snake_case__ : Any = rescale_factor if rescale_factor is not None else self.rescale_factor
snake_case__ : List[str] = do_normalize if do_normalize is not None else self.do_normalize
snake_case__ : Optional[int] = image_mean if image_mean is not None else self.image_mean
snake_case__ : List[str] = image_std if image_std is not None else self.image_std
snake_case__ : Optional[Any] = make_list_of_images(__A )
if not valid_images(__A ):
raise ValueError(
"Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, "
"torch.Tensor, tf.Tensor or jax.ndarray." )
if do_resize and size is None:
raise ValueError("Size must be specified if do_resize is True." )
if do_center_crop and crop_size is None:
raise ValueError("Crop size must be specified if do_center_crop is True." )
if do_rescale and rescale_factor is None:
raise ValueError("Rescale factor must be specified if do_rescale is True." )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("Image mean and std must be specified if do_normalize is True." )
# All transformations expect numpy arrays.
snake_case__ : Union[str, Any] = [to_numpy_array(__A ) for image in images]
if do_resize:
snake_case__ : int = [self.resize(image=__A , size=__A , resample=__A ) for image in images]
if do_center_crop:
snake_case__ : Dict = [self.center_crop(image=__A , size=__A ) for image in images]
if do_rescale:
snake_case__ : str = [self.rescale(image=__A , scale=__A ) for image in images]
if do_normalize:
snake_case__ : Tuple = [self.normalize(image=__A , mean=__A , std=__A ) for image in images]
snake_case__ : str = [to_channel_dimension_format(__A , __A ) for image in images]
snake_case__ : List[Any] = {"pixel_values": images}
return BatchFeature(data=__A , tensor_type=__A )
| 25 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , ):
if attention_mask is None:
snake_case__ : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case__ : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case__ : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ )
if decoder_head_mask is None:
snake_case__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
if cross_attn_head_mask is None:
snake_case__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Any , __A : List[str]=1_3 , __A : List[Any]=7 , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : str=9_9 , __A : Optional[Any]=1_6 , __A : Optional[Any]=2 , __A : Any=4 , __A : List[Any]=4 , __A : int="relu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[Any]=2_0 , __A : Optional[Any]=2 , __A : int=1 , __A : Union[str, Any]=0 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : List[str] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : List[str] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : int = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Tuple = eos_token_id
snake_case__ : Dict = pad_token_id
snake_case__ : str = bos_token_id
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.eos_token_id # Eos Token
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case__ : int = input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Union[str, Any] = self.get_config()
snake_case__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _lowercase ( self : Dict ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : Optional[Any] , __A : int , __A : Dict ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
snake_case__ : List[Any] = inputs_dict["input_ids"]
snake_case__ : Optional[Any] = inputs_dict["attention_mask"]
snake_case__ : Union[str, Any] = inputs_dict["head_mask"]
# first forward pass
snake_case__ : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
snake_case__, snake_case__ : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case__ : Tuple = model(__A , attention_mask=__A )["last_hidden_state"]
snake_case__ : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[
"last_hidden_state"
]
# select random slice
snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) )
def _lowercase ( self : str , __A : Dict , __A : Optional[Any] ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval()
snake_case__ : Union[str, Any] = model(**__A )
snake_case__ : Tuple = outputs.encoder_last_hidden_state
snake_case__ : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_encoder()
encoder.save_pretrained(__A )
snake_case__ : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_decoder()
decoder.save_pretrained(__A )
snake_case__ : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a_ = True
a_ = True
a_ = False
a_ = False
def _lowercase ( self : int , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowercase ( self : Tuple ):
snake_case__ : Any = MaMaaaModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__A )
def _lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
snake_case__, snake_case__ : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["missing_keys"] , [] )
def _lowercase ( self : Dict ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def _lowercase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case__ : str = model_class(__A )
model.to(__A )
model.eval()
snake_case__ : str = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
snake_case__ : Optional[Any] = inputs["input_ids"]
del inputs["input_ids"]
else:
snake_case__ : Union[str, Any] = inputs["input_ids"]
snake_case__ : List[str] = inputs.get("decoder_input_ids" , __A )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , __A )
snake_case__ : Tuple = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case__ : List[Any] = wte(__A )
else:
snake_case__ : Any = wte(__A )
snake_case__ : Optional[int] = wte(__A )
with torch.no_grad():
model(**__A )[0]
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Any = input_dict["input_ids"]
snake_case__ : int = input_ids.ne(1 ).to(__A )
snake_case__ : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ )
__lowerCamelCase : Optional[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ):
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def _lowercase ( self : Optional[int] ):
snake_case__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : str = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : Optional[Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
# change to intended input
snake_case__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : List[str] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
snake_case__ : List[Any] = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case__ : str = tokenizer(__A , padding=__A , return_tensors="pt" )
snake_case__ : Tuple = model.generate(
input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
snake_case__ : List[str] = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
snake_case__ : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {
"""configuration_funnel""": ["""FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FunnelConfig"""],
"""convert_funnel_original_tf_checkpoint_to_pytorch""": [],
"""tokenization_funnel""": ["""FunnelTokenizer"""],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""FunnelTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FunnelBaseModel""",
"""FunnelForMaskedLM""",
"""FunnelForMultipleChoice""",
"""FunnelForPreTraining""",
"""FunnelForQuestionAnswering""",
"""FunnelForSequenceClassification""",
"""FunnelForTokenClassification""",
"""FunnelModel""",
"""FunnelPreTrainedModel""",
"""load_tf_weights_in_funnel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFFunnelBaseModel""",
"""TFFunnelForMaskedLM""",
"""TFFunnelForMultipleChoice""",
"""TFFunnelForPreTraining""",
"""TFFunnelForQuestionAnswering""",
"""TFFunnelForSequenceClassification""",
"""TFFunnelForTokenClassification""",
"""TFFunnelModel""",
"""TFFunnelPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_funnel import FUNNEL_PRETRAINED_CONFIG_ARCHIVE_MAP, FunnelConfig
from .tokenization_funnel import FunnelTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_funnel_fast import FunnelTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_funnel import (
FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
FunnelBaseModel,
FunnelForMaskedLM,
FunnelForMultipleChoice,
FunnelForPreTraining,
FunnelForQuestionAnswering,
FunnelForSequenceClassification,
FunnelForTokenClassification,
FunnelModel,
FunnelPreTrainedModel,
load_tf_weights_in_funnel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_funnel import (
TF_FUNNEL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
TFFunnelPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] ):
snake_case__ : Optional[int] = []
for part_id in partition_order:
snake_case__ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(snake_case_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Union[str, Any] = spark.range(100 ).repartition(1 )
snake_case__ : Any = Spark(snake_case_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[Any] = spark.range(10 ).repartition(2 )
snake_case__ : Optional[Any] = [1, 0]
snake_case__ : Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions.
snake_case__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case__, snake_case__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[int] = spark.range(10 ).repartition(1 )
snake_case__ : Union[str, Any] = SparkExamplesIterable(snake_case_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
snake_case__ : Union[str, Any] = lambda snake_case_ : x.reverse()
snake_case__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] )
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case__ : Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(100 ).repartition(1 )
snake_case__ : Union[str, Any] = Spark(snake_case_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 25 | 1 |
from transformers import BertTokenizer, EncoderDecoderModel, SeqaSeqTrainer, SeqaSeqTrainingArguments
from transformers.testing_utils import TestCasePlus, require_torch, slow
from transformers.utils import is_datasets_available
if is_datasets_available():
import datasets
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
@slow
@require_torch
def _lowercase ( self : Any ):
snake_case__ : List[str] = EncoderDecoderModel.from_encoder_decoder_pretrained("prajjwal1/bert-tiny" , "prajjwal1/bert-tiny" )
snake_case__ : Optional[Any] = BertTokenizer.from_pretrained("bert-base-uncased" )
snake_case__ : Any = bertabert.config.encoder.vocab_size
snake_case__ : List[str] = tokenizer.sep_token_id
snake_case__ : Any = tokenizer.cls_token_id
snake_case__ : Tuple = 1_2_8
snake_case__ : str = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="train[:1%]" )
snake_case__ : List[str] = datasets.load_dataset("cnn_dailymail" , "3.0.0" , split="validation[:1%]" )
snake_case__ : Optional[int] = train_dataset.select(range(3_2 ) )
snake_case__ : Tuple = val_dataset.select(range(1_6 ) )
snake_case__ : List[str] = 4
def _map_to_encoder_decoder_inputs(__A : Optional[int] ):
# Tokenizer will automatically set [BOS] <text> [EOS]
snake_case__ : Dict = tokenizer(batch["article"] , padding="max_length" , truncation=__A , max_length=5_1_2 )
snake_case__ : List[str] = tokenizer(batch["highlights"] , padding="max_length" , truncation=__A , max_length=1_2_8 )
snake_case__ : str = inputs.input_ids
snake_case__ : int = inputs.attention_mask
snake_case__ : Tuple = outputs.input_ids
snake_case__ : Dict = outputs.input_ids.copy()
snake_case__ : Optional[Any] = [
[-1_0_0 if token == tokenizer.pad_token_id else token for token in labels] for labels in batch["labels"]
]
snake_case__ : List[Any] = outputs.attention_mask
assert all(len(__A ) == 5_1_2 for x in inputs.input_ids )
assert all(len(__A ) == 1_2_8 for x in outputs.input_ids )
return batch
def _compute_metrics(__A : Union[str, Any] ):
snake_case__ : Dict = pred.label_ids
snake_case__ : Any = pred.predictions
# all unnecessary tokens are removed
snake_case__ : str = tokenizer.batch_decode(__A , skip_special_tokens=__A )
snake_case__ : Any = tokenizer.batch_decode(__A , skip_special_tokens=__A )
snake_case__ : str = sum([int(pred_str[i] == label_str[i] ) for i in range(len(__A ) )] ) / len(__A )
return {"accuracy": accuracy}
# map train dataset
snake_case__ : Tuple = train_dataset.map(
_map_to_encoder_decoder_inputs , batched=__A , batch_size=__A , remove_columns=["article", "highlights"] , )
train_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
# same for validation dataset
snake_case__ : int = val_dataset.map(
_map_to_encoder_decoder_inputs , batched=__A , batch_size=__A , remove_columns=["article", "highlights"] , )
val_dataset.set_format(
type="torch" , columns=["input_ids", "attention_mask", "decoder_input_ids", "decoder_attention_mask", "labels"] , )
snake_case__ : Dict = self.get_auto_remove_tmp_dir()
snake_case__ : List[str] = SeqaSeqTrainingArguments(
output_dir=__A , per_device_train_batch_size=__A , per_device_eval_batch_size=__A , predict_with_generate=__A , evaluation_strategy="steps" , do_train=__A , do_eval=__A , warmup_steps=0 , eval_steps=2 , logging_steps=2 , )
# instantiate trainer
snake_case__ : Dict = SeqaSeqTrainer(
model=__A , args=__A , compute_metrics=_compute_metrics , train_dataset=__A , eval_dataset=__A , tokenizer=__A , )
# start training
trainer.train()
| 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 1 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : str = value
elif weight_type == "weight_g":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : str = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
snake_case__ : str = []
snake_case__ : Optional[int] = fairseq_model.state_dict()
snake_case__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[Any] = "weight"
else:
snake_case__ : Optional[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
snake_case__ : Tuple = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : str = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ):
if config_path is not None:
snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = UniSpeechSatConfig()
snake_case__ : str = ""
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ )
else:
snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ )
snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 25 | 1 |
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple=None , snake_case_ : Optional[Any]=None ):
return field(default_factory=lambda: default , metadata=snake_case_ )
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = field(
metadata={"help": "The csv file to plot."} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Whether to plot along batch size or sequence length. Defaults to sequence length."} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Whether the csv file has time results or memory results. Defaults to memory results."} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Disable logarithmic scale when plotting"} , )
a_ = field(
default=UpperCamelCase_ , metadata={
"help": "Whether the csv file has training results or inference results. Defaults to inference results."
} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Filename under which the plot will be saved. If unused no plot is saved."} , )
a_ = list_field(
default=UpperCamelCase_ , metadata={"help": "List of model names that are used instead of the ones in the csv file."} )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] ):
try:
int(snake_case_ )
return True
except ValueError:
return False
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
try:
float(snake_case_ )
return True
except ValueError:
return False
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : str , __A : List[str] ):
snake_case__ : List[str] = args
snake_case__ : Any = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="" ) as csv_file:
snake_case__ : Tuple = csv.DictReader(__A )
for row in reader:
snake_case__ : List[str] = row["model"]
self.result_dict[model_name]["bsz"].append(int(row["batch_size"] ) )
self.result_dict[model_name]["seq_len"].append(int(row["sequence_length"] ) )
if can_convert_to_int(row["result"] ):
# value is not None
snake_case__ : Tuple = int(row["result"] )
elif can_convert_to_float(row["result"] ):
# value is not None
snake_case__ : Any = float(row["result"] )
def _lowercase ( self : Any ):
snake_case__, snake_case__ : Optional[Any] = plt.subplots()
snake_case__ : Tuple = "Time usage" if self.args.is_time else "Memory usage"
snake_case__ : str = title_str + " for training" if self.args.is_train else title_str + " for inference"
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("log" )
ax.set_yscale("log" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
snake_case__ : Tuple = sorted(set(self.result_dict[model_name]["bsz"] ) )
snake_case__ : Optional[int] = sorted(set(self.result_dict[model_name]["seq_len"] ) )
snake_case__ : str = self.result_dict[model_name]["result"]
((snake_case__), (snake_case__)) : List[str] = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
snake_case__ : Tuple = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
snake_case__ : Optional[Any] = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__A , )
else:
snake_case__ : str = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
((snake_case__), (snake_case__)) : str = (
("batch_size", "len") if self.args.plot_along_batch else ("in #tokens", "bsz")
)
snake_case__ : Optional[int] = np.asarray(__A , __A )[: len(__A )]
plt.scatter(
__A , __A , label=f'''{label_model_name} - {inner_loop_label}: {inner_loop_value}''' )
plt.plot(__A , __A , "--" )
title_str += f''' {label_model_name} vs.'''
snake_case__ : str = title_str[:-4]
snake_case__ : Optional[int] = "Time in s" if self.args.is_time else "Memory in MB"
# plot
plt.title(__A )
plt.xlabel(__A )
plt.ylabel(__A )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Union[str, Any] = HfArgumentParser(snake_case_ )
snake_case__ : Dict = parser.parse_args_into_dataclasses()[0]
snake_case__ : Optional[Any] = Plot(args=snake_case_ )
plot.plot()
if __name__ == "__main__":
main()
| 25 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE ( snake_case_ : dict ):
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ):
snake_case__ : Optional[int] = XGBClassifier()
classifier.fit(snake_case_ , snake_case_ )
return classifier
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = load_iris()
snake_case__, snake_case__ : str = data_handling(snake_case_ )
snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split(
snake_case_ , snake_case_ , test_size=0.25 )
snake_case__ : Dict = iris["target_names"]
# Create an XGBoost Classifier from the training data
snake_case__ : Dict = xgboost(snake_case_ , snake_case_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 25 | 1 |
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Tuple ):
snake_case__ : Union[str, Any] = {}
def _lowercase ( self : Union[str, Any] ):
print(self.vertex )
for i in self.vertex:
print(__A , " -> " , " -> ".join([str(__A ) for j in self.vertex[i]] ) )
def _lowercase ( self : List[str] , __A : int , __A : int ):
# check if vertex is already present,
if from_vertex in self.vertex:
self.vertex[from_vertex].append(__A )
else:
# else make a new vertex
snake_case__ : Optional[int] = [to_vertex]
def _lowercase ( self : Optional[int] ):
# visited array for storing already visited nodes
snake_case__ : Optional[int] = [False] * len(self.vertex )
# call the recursive helper function
for i in range(len(self.vertex ) ):
if not visited[i]:
self.dfs_recursive(__A , __A )
def _lowercase ( self : Any , __A : int , __A : list ):
# mark start vertex as visited
snake_case__ : Tuple = True
print(__A , end=" " )
# Recur for all the vertices that are adjacent to this node
for i in self.vertex:
if not visited[i]:
self.dfs_recursive(__A , __A )
if __name__ == "__main__":
__lowerCamelCase : str = Graph()
g.add_edge(0, 1)
g.add_edge(0, 2)
g.add_edge(1, 2)
g.add_edge(2, 0)
g.add_edge(2, 3)
g.add_edge(3, 3)
g.print_graph()
print("""DFS:""")
g.dfs()
# OUTPUT:
# 0 -> 1 -> 2
# 1 -> 2
# 2 -> 0 -> 3
# 3 -> 3
# DFS:
# 0 1 2 3
| 25 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ):
snake_case__ : Tuple = args.log_outputs
snake_case__ : Union[str, Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
snake_case__ : List[str] = load_metric("wer" )
snake_case__ : List[str] = load_metric("cer" )
# compute metrics
snake_case__ : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
snake_case__ : List[str] = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
snake_case__ : Dict = F'''WER: {wer_result}\nCER: {cer_result}'''
print(snake_case_ )
with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case__ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt'''
snake_case__ : int = F'''log_{dataset_id}_targets.txt'''
with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Union[str, Any] , snake_case_ : Any ):
p.write(F'''{i}''' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(F'''{i}''' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case_ , with_indices=snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case__ : Optional[int] = re.sub(snake_case_ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case__ : Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
snake_case__ : Optional[int] = " ".join(text.split(snake_case_ ) )
return text
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# load dataset
snake_case__ : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case__ : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case__ : List[Any] = feature_extractor.sampling_rate
# resample audio
snake_case__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
snake_case__ : int = 0 if torch.cuda.is_available() else -1
snake_case__ : List[str] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Any ):
snake_case__ : Union[str, Any] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case__ : Optional[int] = prediction["text"]
snake_case__ : Optional[Any] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
snake_case__ : Any = dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__lowerCamelCase : str = parser.parse_args()
main(args)
| 25 | 1 |
import unittest
from transformers import MPNetConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : Union[str, Any]=1_3 , __A : int=7 , __A : Optional[int]=True , __A : str=True , __A : Dict=False , __A : Tuple=True , __A : List[str]=9_9 , __A : Tuple=6_4 , __A : int=5 , __A : Any=4 , __A : List[str]=6_4 , __A : int="gelu" , __A : Any=0.1 , __A : str=0.1 , __A : str=5_1_2 , __A : str=1_6 , __A : Tuple=2 , __A : Any=0.0_2 , __A : str=3 , __A : Optional[int]=4 , __A : Tuple=None , ):
snake_case__ : int = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : int = is_training
snake_case__ : Tuple = use_input_mask
snake_case__ : Optional[int] = use_token_type_ids
snake_case__ : str = use_labels
snake_case__ : Optional[Any] = vocab_size
snake_case__ : List[Any] = hidden_size
snake_case__ : Tuple = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : int = intermediate_size
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Dict = hidden_dropout_prob
snake_case__ : str = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : Optional[Any] = type_vocab_size
snake_case__ : Dict = type_sequence_label_size
snake_case__ : str = initializer_range
snake_case__ : List[str] = num_labels
snake_case__ : int = num_choices
snake_case__ : Dict = scope
def _lowercase ( self : Any ):
return MPNetConfig.from_pretrained("microsoft/mpnet-base" )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = None
if self.use_input_mask:
snake_case__ : Dict = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Optional[Any] = None
snake_case__ : Tuple = None
snake_case__ : List[str] = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Dict = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : str ):
return MPNetConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _lowercase ( self : str , __A : Dict , __A : str , __A : Any , __A : int , __A : List[Any] , __A : Optional[Any] ):
snake_case__ : Any = MPNetModel(config=__A )
model.to(__A )
model.eval()
snake_case__ : Optional[Any] = model(__A , __A )
snake_case__ : Dict = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : Tuple , __A : Union[str, Any] , __A : List[str] , __A : Union[str, Any] , __A : List[Any] , __A : int , __A : Optional[Any] ):
snake_case__ : Any = MPNetForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
snake_case__ : Union[str, Any] = model(
__A , attention_mask=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Tuple , __A : int , __A : Tuple , __A : int , __A : str , __A : List[str] , __A : str ):
snake_case__ : str = self.num_labels
snake_case__ : Tuple = MPNetForSequenceClassification(__A )
model.to(__A )
model.eval()
snake_case__ : str = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Any , __A : List[str] , __A : Any , __A : Union[str, Any] , __A : Any , __A : Optional[Any] , __A : List[Any] ):
snake_case__ : int = self.num_choices
snake_case__ : Tuple = MPNetForMultipleChoice(config=__A )
model.to(__A )
model.eval()
snake_case__ : Optional[Any] = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : str = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : int = model(
__A , attention_mask=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : str , __A : Union[str, Any] , __A : List[str] , __A : Optional[int] , __A : List[Any] , __A : Union[str, Any] , __A : List[str] ):
snake_case__ : Tuple = self.num_labels
snake_case__ : List[str] = MPNetForTokenClassification(config=__A )
model.to(__A )
model.eval()
snake_case__ : Any = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Any ):
snake_case__ : Optional[int] = self.prepare_config_and_inputs()
((snake_case__), (snake_case__), (snake_case__), (snake_case__), (snake_case__), (snake_case__)) : Tuple = config_and_inputs
snake_case__ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MPNetForMaskedLM,
MPNetForMultipleChoice,
MPNetForQuestionAnswering,
MPNetForSequenceClassification,
MPNetForTokenClassification,
MPNetModel,
)
if is_torch_available()
else ()
)
a_ = (
{
"feature-extraction": MPNetModel,
"fill-mask": MPNetForMaskedLM,
"question-answering": MPNetForQuestionAnswering,
"text-classification": MPNetForSequenceClassification,
"token-classification": MPNetForTokenClassification,
"zero-shot": MPNetForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = True
def _lowercase ( self : List[str] ):
snake_case__ : Any = MPNetModelTester(self )
snake_case__ : Tuple = ConfigTester(self , config_class=__A , hidden_size=3_7 )
def _lowercase ( self : Tuple ):
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[int] ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_model(*__A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_sequence_classification(*__A )
def _lowercase ( self : Any ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_multiple_choice(*__A )
def _lowercase ( self : List[Any] ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_token_classification(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mpnet_for_question_answering(*__A )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : int ):
snake_case__ : List[Any] = MPNetModel.from_pretrained("microsoft/mpnet-base" )
snake_case__ : Optional[int] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
snake_case__ : List[Any] = model(__A )[0]
snake_case__ : Optional[int] = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __A )
snake_case__ : Any = torch.tensor(
[[[-0.0_5_5_0, 0.1_9_4_3, -0.0_7_4_0], [-0.0_5_6_2, 0.2_2_1_1, -0.0_5_7_9], [-0.0_4_3_7, 0.3_3_3_7, -0.0_6_4_1]]] )
# compare the actual values for a slice.
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=1e-4 ) )
| 25 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
a_ = Features({"text": Value("string" )} )
a_ = Features({"labels": ClassLabel} )
a_ = "text"
a_ = "labels"
def _lowercase ( self : Tuple , __A : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
snake_case__ : Any = copy.deepcopy(self )
snake_case__ : Optional[Any] = self.label_schema.copy()
snake_case__ : List[str] = features[self.label_column]
snake_case__ : Dict = label_schema
return task_template
@property
def _lowercase ( self : Tuple ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 25 | 1 |
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""microsoft/deberta-v2-xlarge""": """https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xxlarge""": """https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json""",
"""microsoft/deberta-v2-xlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json"""
),
"""microsoft/deberta-v2-xxlarge-mnli""": (
"""https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "deberta-v2"
def __init__( self : Dict , __A : Dict=1_2_8_1_0_0 , __A : Any=1_5_3_6 , __A : Union[str, Any]=2_4 , __A : Optional[int]=2_4 , __A : Any=6_1_4_4 , __A : List[str]="gelu" , __A : List[str]=0.1 , __A : int=0.1 , __A : Dict=5_1_2 , __A : Optional[int]=0 , __A : Union[str, Any]=0.0_2 , __A : str=1e-7 , __A : Union[str, Any]=False , __A : str=-1 , __A : str=0 , __A : Optional[Any]=True , __A : Union[str, Any]=None , __A : Any=0 , __A : Any="gelu" , **__A : Optional[int] , ):
super().__init__(**__A )
snake_case__ : Optional[int] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : Optional[int] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : Union[str, Any] = hidden_act
snake_case__ : Any = hidden_dropout_prob
snake_case__ : Tuple = attention_probs_dropout_prob
snake_case__ : Tuple = max_position_embeddings
snake_case__ : List[Any] = type_vocab_size
snake_case__ : int = initializer_range
snake_case__ : List[Any] = relative_attention
snake_case__ : Dict = max_relative_positions
snake_case__ : Any = pad_token_id
snake_case__ : Any = position_biased_input
# Backwards compatibility
if type(__A ) == str:
snake_case__ : List[Any] = [x.strip() for x in pos_att_type.lower().split("|" )]
snake_case__ : List[str] = pos_att_type
snake_case__ : Tuple = vocab_size
snake_case__ : List[str] = layer_norm_eps
snake_case__ : List[str] = kwargs.get("pooler_hidden_size" , __A )
snake_case__ : Tuple = pooler_dropout
snake_case__ : str = pooler_hidden_act
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
@property
def _lowercase ( self : str ):
if self.task == "multiple-choice":
snake_case__ : int = {0: "batch", 1: "choice", 2: "sequence"}
else:
snake_case__ : List[Any] = {0: "batch", 1: "sequence"}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("input_ids", dynamic_axis), ("attention_mask", dynamic_axis), ("token_type_ids", dynamic_axis)] )
else:
return OrderedDict([("input_ids", dynamic_axis), ("attention_mask", dynamic_axis)] )
@property
def _lowercase ( self : Dict ):
return 1_2
def _lowercase ( self : List[Any] , __A : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __A : int = -1 , __A : int = -1 , __A : int = -1 , __A : bool = False , __A : Optional["TensorType"] = None , __A : int = 3 , __A : int = 4_0 , __A : int = 4_0 , __A : "PreTrainedTokenizerBase" = None , ):
snake_case__ : int = super().generate_dummy_inputs(preprocessor=__A , framework=__A )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 25 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_vision_model"
def __init__( self : List[Any] , __A : Dict=1_4_0_8 , __A : Tuple=6_1_4_4 , __A : str=3_9 , __A : int=1_6 , __A : str=2_2_4 , __A : Any=1_4 , __A : Dict="gelu" , __A : List[Any]=1e-6 , __A : Any=0.0 , __A : List[Any]=1e-1_0 , __A : Union[str, Any]=True , **__A : Tuple , ):
super().__init__(**__A )
snake_case__ : List[str] = hidden_size
snake_case__ : Optional[int] = intermediate_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : str = patch_size
snake_case__ : int = image_size
snake_case__ : int = initializer_range
snake_case__ : Optional[int] = attention_dropout
snake_case__ : str = layer_norm_eps
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Tuple = qkv_bias
@classmethod
def _lowercase ( cls : List[str] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : str = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : Union[str, Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_qformer"
def __init__( self : Any , __A : Union[str, Any]=3_0_5_2_2 , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=1_2 , __A : Dict=1_2 , __A : Dict=3_0_7_2 , __A : List[str]="gelu" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Any=5_1_2 , __A : Optional[int]=0.0_2 , __A : List[str]=1e-1_2 , __A : Any=0 , __A : Optional[Any]="absolute" , __A : str=2 , __A : Any=1_4_0_8 , **__A : List[str] , ):
super().__init__(pad_token_id=__A , **__A )
snake_case__ : Dict = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : int = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : Dict = cross_attention_frequency
snake_case__ : List[str] = encoder_hidden_size
@classmethod
def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Optional[int] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : Tuple = cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : List[Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip"
a_ = True
def __init__( self : List[str] , __A : Optional[Any]=None , __A : Tuple=None , __A : Optional[int]=None , __A : Optional[Any]=3_2 , **__A : Optional[int] ):
super().__init__(**__A )
if vision_config is None:
snake_case__ : Any = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
snake_case__ : Optional[Any] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
snake_case__ : Optional[int] = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
snake_case__ : List[Any] = InstructBlipVisionConfig(**__A )
snake_case__ : Union[str, Any] = InstructBlipQFormerConfig(**__A )
snake_case__ : Dict = text_config["model_type"] if "model_type" in text_config else "opt"
snake_case__ : List[Any] = CONFIG_MAPPING[text_model_type](**__A )
snake_case__ : Union[str, Any] = self.text_config.tie_word_embeddings
snake_case__ : Tuple = self.text_config.is_encoder_decoder
snake_case__ : str = num_query_tokens
snake_case__ : Dict = self.vision_config.hidden_size
snake_case__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case__ : int = 1.0
snake_case__ : Optional[int] = 0.0_2
@classmethod
def _lowercase ( cls : List[str] , __A : InstructBlipVisionConfig , __A : InstructBlipQFormerConfig , __A : PretrainedConfig , **__A : int , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def _lowercase ( self : Optional[int] ):
snake_case__ : Any = copy.deepcopy(self.__dict__ )
snake_case__ : Optional[Any] = self.vision_config.to_dict()
snake_case__ : List[str] = self.qformer_config.to_dict()
snake_case__ : List[Any] = self.text_config.to_dict()
snake_case__ : List[Any] = self.__class__.model_type
return output
| 25 | 1 |
import os
import tempfile
import unittest
from transformers import DistilBertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
DistilBertModel,
)
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : List[str] , __A : Dict=1_3 , __A : str=7 , __A : int=True , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : Tuple=True , __A : Any=9_9 , __A : Any=3_2 , __A : Dict=5 , __A : Optional[int]=4 , __A : Tuple=3_7 , __A : Dict="gelu" , __A : Dict=0.1 , __A : Any=0.1 , __A : List[Any]=5_1_2 , __A : Union[str, Any]=1_6 , __A : Any=2 , __A : List[str]=0.0_2 , __A : Optional[Any]=3 , __A : Union[str, Any]=4 , __A : Tuple=None , ):
snake_case__ : Tuple = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : Optional[Any] = seq_length
snake_case__ : str = is_training
snake_case__ : Any = use_input_mask
snake_case__ : Tuple = use_token_type_ids
snake_case__ : Optional[int] = use_labels
snake_case__ : Optional[int] = vocab_size
snake_case__ : Any = hidden_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : Optional[Any] = num_attention_heads
snake_case__ : Dict = intermediate_size
snake_case__ : Dict = hidden_act
snake_case__ : List[str] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : Optional[int] = max_position_embeddings
snake_case__ : int = type_vocab_size
snake_case__ : int = type_sequence_label_size
snake_case__ : Dict = initializer_range
snake_case__ : List[Any] = num_labels
snake_case__ : str = num_choices
snake_case__ : Tuple = scope
def _lowercase ( self : Union[str, Any] ):
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Dict = None
if self.use_input_mask:
snake_case__ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : Union[str, Any] = None
snake_case__ : Tuple = None
snake_case__ : Union[str, Any] = None
if self.use_labels:
snake_case__ : Union[str, Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Optional[int] = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Optional[int] = self.get_config()
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Optional[Any] ):
return DistilBertConfig(
vocab_size=self.vocab_size , dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , hidden_dim=self.intermediate_size , hidden_act=self.hidden_act , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , )
def _lowercase ( self : Any , __A : Optional[int] , __A : str , __A : List[str] , __A : Tuple , __A : Union[str, Any] , __A : Any ):
snake_case__ : List[Any] = DistilBertModel(config=__A )
model.to(__A )
model.eval()
snake_case__ : str = model(__A , __A )
snake_case__ : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _lowercase ( self : Optional[Any] , __A : List[str] , __A : int , __A : List[Any] , __A : str , __A : int , __A : int ):
snake_case__ : str = DistilBertForMaskedLM(config=__A )
model.to(__A )
model.eval()
snake_case__ : List[Any] = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : List[str] , __A : Tuple , __A : Optional[int] , __A : Optional[int] , __A : Tuple , __A : Dict , __A : Any ):
snake_case__ : str = DistilBertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
snake_case__ : Dict = model(
__A , attention_mask=__A , start_positions=__A , end_positions=__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : int , __A : str , __A : Optional[Any] , __A : Optional[Any] , __A : Optional[int] , __A : List[Any] , __A : Dict ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : Dict = DistilBertForSequenceClassification(__A )
model.to(__A )
model.eval()
snake_case__ : List[str] = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Tuple , __A : Dict , __A : Optional[Any] , __A : int , __A : List[str] , __A : Tuple , __A : Any ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : Tuple = DistilBertForTokenClassification(config=__A )
model.to(__A )
model.eval()
snake_case__ : Tuple = model(__A , attention_mask=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : List[Any] , __A : str , __A : str , __A : Dict , __A : Optional[Any] , __A : Any , __A : Tuple ):
snake_case__ : Union[str, Any] = self.num_choices
snake_case__ : List[Any] = DistilBertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
snake_case__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Optional[int] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : List[Any] = model(
__A , attention_mask=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : str ):
snake_case__ : List[str] = self.prepare_config_and_inputs()
((snake_case__), (snake_case__), (snake_case__), (snake_case__), (snake_case__), (snake_case__)) : Optional[Any] = config_and_inputs
snake_case__ : List[str] = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
DistilBertModel,
DistilBertForMaskedLM,
DistilBertForMultipleChoice,
DistilBertForQuestionAnswering,
DistilBertForSequenceClassification,
DistilBertForTokenClassification,
)
if is_torch_available()
else None
)
a_ = (
{
"feature-extraction": DistilBertModel,
"fill-mask": DistilBertForMaskedLM,
"question-answering": DistilBertForQuestionAnswering,
"text-classification": DistilBertForSequenceClassification,
"token-classification": DistilBertForTokenClassification,
"zero-shot": DistilBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = True
a_ = True
a_ = True
a_ = True
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = DistilBertModelTester(self )
snake_case__ : str = ConfigTester(self , config_class=__A , dim=3_7 )
def _lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_model(*__A )
def _lowercase ( self : int ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_masked_lm(*__A )
def _lowercase ( self : List[str] ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_question_answering(*__A )
def _lowercase ( self : str ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_sequence_classification(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_token_classification(*__A )
def _lowercase ( self : str ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_distilbert_for_multiple_choice(*__A )
@slow
def _lowercase ( self : List[Any] ):
for model_name in DISTILBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : List[str] = DistilBertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@slow
@require_torch_gpu
def _lowercase ( self : Dict ):
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# BertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == DistilBertForMultipleChoice:
return
snake_case__ : Any = True
snake_case__ : int = model_class(config=__A )
snake_case__ : Tuple = self._prepare_for_class(__A , __A )
snake_case__ : Any = torch.jit.trace(
__A , (inputs_dict["input_ids"].to("cpu" ), inputs_dict["attention_mask"].to("cpu" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__A , os.path.join(__A , "traced_model.pt" ) )
snake_case__ : int = torch.jit.load(os.path.join(__A , "traced_model.pt" ) , map_location=__A )
loaded(inputs_dict["input_ids"].to(__A ) , inputs_dict["attention_mask"].to(__A ) )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = DistilBertModel.from_pretrained("distilbert-base-uncased" )
snake_case__ : int = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
snake_case__ : Optional[int] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case__ : Optional[Any] = model(__A , attention_mask=__A )[0]
snake_case__ : Any = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __A )
snake_case__ : Union[str, Any] = torch.tensor(
[[[-0.1_6_3_9, 0.3_2_9_9, 0.1_6_4_8], [-0.1_7_4_6, 0.3_2_8_9, 0.1_7_1_0], [-0.1_8_8_4, 0.3_3_5_7, 0.1_8_1_0]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
| 25 |
def SCREAMING_SNAKE_CASE ( snake_case_ : list ):
if len(snake_case_ ) <= 1:
return lst
snake_case__ : List[Any] = 1
while i < len(snake_case_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
snake_case__, snake_case__ : Tuple = lst[i], lst[i - 1]
i -= 1
if i == 0:
snake_case__ : Union[str, Any] = 1
return lst
if __name__ == "__main__":
__lowerCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 25 | 1 |
import pytest
__lowerCamelCase : Tuple = """__dummy_dataset1__"""
__lowerCamelCase : List[Any] = """
import json
import os
import datasets
REPO_URL = \"https://huggingface.co/datasets/albertvillanova/tests-raw-jsonl/resolve/main/\"
URLS = {\"train\": REPO_URL + \"wikiann-bn-train.jsonl\", \"validation\": REPO_URL + \"wikiann-bn-validation.jsonl\"}
class __DummyDataset1__(datasets.GeneratorBasedBuilder):
def _info(self):
features = datasets.Features(
{
\"tokens\": datasets.Sequence(datasets.Value(\"string\")),
\"ner_tags\": datasets.Sequence(
datasets.features.ClassLabel(
names=[
\"O\",
\"B-PER\",
\"I-PER\",
\"B-ORG\",
\"I-ORG\",
\"B-LOC\",
\"I-LOC\",
]
)
),
\"langs\": datasets.Sequence(datasets.Value(\"string\")),
\"spans\": datasets.Sequence(datasets.Value(\"string\")),
}
)
return datasets.DatasetInfo(features=features)
def _split_generators(self, dl_manager):
dl_path = dl_manager.download(URLS)
return [
datasets.SplitGenerator(datasets.Split.TRAIN, gen_kwargs={\"filepath\": dl_path[\"train\"]}),
datasets.SplitGenerator(datasets.Split.VALIDATION, gen_kwargs={\"filepath\": dl_path[\"validation\"]}),
]
def _generate_examples(self, filepath):
with open(filepath, \"r\", encoding=\"utf-8\") as f:
for i, line in enumerate(f):
yield i, json.loads(line)
"""
@pytest.fixture
def SCREAMING_SNAKE_CASE ( ):
return DATASET_LOADING_SCRIPT_NAME
@pytest.fixture
def SCREAMING_SNAKE_CASE ( ):
return DATASET_LOADING_SCRIPT_CODE
@pytest.fixture
def SCREAMING_SNAKE_CASE ( snake_case_ : List[str] , snake_case_ : int , snake_case_ : Union[str, Any] ):
snake_case__ : Any = dataset_loading_script_name
snake_case__ : Dict = tmp_path / "datasets" / script_name
script_dir.mkdir(parents=snake_case_ )
snake_case__ : Dict = script_dir / F'''{script_name}.py'''
with open(snake_case_ , "w" ) as f:
f.write(snake_case_ )
return str(snake_case_ )
| 25 |
from __future__ import annotations
import time
__lowerCamelCase : str = list[tuple[int, int]]
__lowerCamelCase : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ):
snake_case__ : Optional[int] = pos_x
snake_case__ : Dict = pos_y
snake_case__ : int = (pos_y, pos_x)
snake_case__ : Optional[int] = goal_x
snake_case__ : Tuple = goal_y
snake_case__ : str = parent
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ):
snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A )
snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A )
snake_case__ : int = [self.start]
snake_case__ : Union[str, Any] = False
def _lowercase ( self : Dict ):
while self.node_queue:
snake_case__ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ : Optional[Any] = True
return self.retrace_path(__A )
snake_case__ : int = self.get_successors(__A )
for node in successors:
self.node_queue.append(__A )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Union[str, Any] , __A : Node ):
snake_case__ : str = []
for action in delta:
snake_case__ : str = parent.pos_x + action[1]
snake_case__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) )
return successors
def _lowercase ( self : Optional[Any] , __A : Node | None ):
snake_case__ : Tuple = node
snake_case__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : Tuple = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : str , __A : int ):
snake_case__ : str = BreadthFirstSearch(__A , __A )
snake_case__ : int = BreadthFirstSearch(__A , __A )
snake_case__ : Tuple = False
def _lowercase ( self : Optional[Any] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 )
snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ : List[str] = True
return self.retrace_bidirectional_path(
__A , __A )
snake_case__ : Union[str, Any] = current_bwd_node
snake_case__ : Dict = current_fwd_node
snake_case__ : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(__A ),
self.bwd_bfs: self.bwd_bfs.get_successors(__A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Any , __A : Node , __A : Node ):
snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A )
snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowerCamelCase : str = (0, 0)
__lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : Any = time.time()
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal)
__lowerCamelCase : str = bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
__lowerCamelCase : Optional[Any] = time.time()
__lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
__lowerCamelCase : str = bd_bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 25 | 1 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
"""tanreinama/GPTSAN-2.8B-spout_is_uniform""": (
"""https://huggingface.co/tanreinama/GPTSAN-2.8B-spout_is_uniform/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "gptsan-japanese"
a_ = [
"past_key_values",
]
a_ = {
"hidden_size": "d_model",
"num_attention_heads": "num_heads",
"num_hidden_layers": "num_layers",
}
def __init__( self : Optional[Any] , __A : Optional[int]=3_6_0_0_0 , __A : Optional[int]=1_2_8_0 , __A : Optional[Any]=1_0_2_4 , __A : Optional[Any]=8_1_9_2 , __A : Optional[Any]=4_0_9_6 , __A : str=1_2_8 , __A : List[str]=1_0 , __A : Union[str, Any]=0 , __A : Optional[Any]=1_6 , __A : str=1_6 , __A : Optional[int]=1_2_8 , __A : List[str]=0.0 , __A : Any=1e-5 , __A : List[Any]=False , __A : Optional[Any]=0.0 , __A : List[str]="float32" , __A : List[str]=False , __A : int=False , __A : int=False , __A : Tuple=0.0_0_2 , __A : str=False , __A : str=True , __A : List[Any]=3_5_9_9_8 , __A : Any=3_5_9_9_5 , __A : Dict=3_5_9_9_9 , **__A : Optional[Any] , ):
snake_case__ : Dict = vocab_size
snake_case__ : Optional[Any] = max_position_embeddings
snake_case__ : Optional[int] = d_model
snake_case__ : Optional[Any] = d_ff
snake_case__ : int = d_ext
snake_case__ : Optional[int] = d_spout
snake_case__ : str = num_switch_layers
snake_case__ : int = num_ext_layers
snake_case__ : List[Any] = num_switch_layers + num_ext_layers
snake_case__ : Union[str, Any] = num_heads
snake_case__ : Any = num_experts
snake_case__ : Dict = expert_capacity
snake_case__ : List[Any] = dropout_rate
snake_case__ : Tuple = layer_norm_epsilon
snake_case__ : Union[str, Any] = router_bias
snake_case__ : str = router_jitter_noise
snake_case__ : List[str] = router_dtype
snake_case__ : Any = router_ignore_padding_tokens
snake_case__ : List[Any] = output_hidden_states
snake_case__ : Union[str, Any] = output_attentions
snake_case__ : Optional[int] = initializer_factor
snake_case__ : Dict = output_router_logits
snake_case__ : Union[str, Any] = use_cache
super().__init__(
separator_token_id=__A , pad_token_id=__A , eos_token_id=__A , **__A , )
| 25 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __A : Dict , __A : int=7 , __A : Optional[Any]=3 , __A : List[str]=3_0 , __A : List[Any]=4_0_0 , __A : Union[str, Any]=True , __A : List[Any]=None , __A : Optional[Any]=True , __A : Tuple=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : Any=1 / 2_5_5 , __A : Optional[int]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : str = min_resolution
snake_case__ : Tuple = max_resolution
snake_case__ : List[Any] = do_resize
snake_case__ : Dict = size
snake_case__ : List[str] = do_normalize
snake_case__ : Optional[int] = image_mean
snake_case__ : Optional[int] = image_std
snake_case__ : Any = do_rescale
snake_case__ : Optional[int] = rescale_factor
snake_case__ : int = do_pad
def _lowercase ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[int] , __A : Dict , __A : List[Any]=False ):
if not batched:
snake_case__ : List[str] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : Tuple = image.size
else:
snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case__ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case__ : List[Any] = self.size["shortest_edge"]
snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Dict = self.size["shortest_edge"]
snake_case__ : Dict = self.size["shortest_edge"]
else:
snake_case__ : str = []
for image in image_inputs:
snake_case__, snake_case__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Dict = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : Tuple = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : int ):
snake_case__ : Tuple = ConditionalDetrImageProcessingTester(self )
@property
def _lowercase ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Any ):
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : List[str] ):
snake_case__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : Union[str, Any] ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : str = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : List[Any] ):
# prepare image and target
snake_case__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Union[str, Any] = json.loads(f.read() )
snake_case__ : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : Tuple = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case__ : int = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : str ):
# prepare image, target and masks_path
snake_case__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : int = json.loads(f.read() )
snake_case__ : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : Optional[int] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : str = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 1 |
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
import torch
from datasets import load_dataset
from torchvision.transforms import Compose, Lambda, Normalize, RandomHorizontalFlip, RandomResizedCrop, ToTensor
from torchvision.transforms.functional import InterpolationMode
import transformers
from transformers import (
HfArgumentParser,
Trainer,
TrainingArguments,
ViTImageProcessor,
ViTMAEConfig,
ViTMAEForPreTraining,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
__lowerCamelCase : str = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version("""4.31.0""")
require_version("""datasets>=1.8.0""", """To fix: pip install -r examples/pytorch/image-pretraining/requirements.txt""")
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = field(
default="cifar10" , metadata={"help": "Name of a dataset from the datasets package"} )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "The configuration name of the dataset to use (via the datasets library)."} )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "The column name of the images in the files."} )
a_ = field(default=UpperCamelCase_ , metadata={"help": "A folder containing the training data."} )
a_ = field(default=UpperCamelCase_ , metadata={"help": "A folder containing the validation data."} )
a_ = field(
default=0.15 , metadata={"help": "Percent to split off of train for validation."} )
a_ = field(
default=UpperCamelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of training examples to this "
"value if set."
)
} , )
a_ = field(
default=UpperCamelCase_ , metadata={
"help": (
"For debugging purposes or quicker training, truncate the number of evaluation examples to this "
"value if set."
)
} , )
def _lowercase ( self : List[Any] ):
snake_case__ : int = {}
if self.train_dir is not None:
snake_case__ : Tuple = self.train_dir
if self.validation_dir is not None:
snake_case__ : Dict = self.validation_dir
snake_case__ : str = data_files if data_files else None
@dataclass
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
a_ = field(
default=UpperCamelCase_ , metadata={
"help": (
"The model checkpoint for weights initialization.Don't set if you want to train a model from scratch."
)
} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Pretrained config name or path if not the same as model_name_or_path"} )
a_ = field(
default=UpperCamelCase_ , metadata={
"help": (
"Override some existing default config settings when a model is trained from scratch. Example: "
"n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index"
)
} , )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Where do you want to store the pretrained models downloaded from s3"} )
a_ = field(
default="main" , metadata={"help": "The specific model version to use (can be a branch name, tag name or commit id)."} , )
a_ = field(default=UpperCamelCase_ , metadata={"help": "Name or path of preprocessor config."} )
a_ = field(
default=UpperCamelCase_ , metadata={
"help": (
"Will use the token generated when running `huggingface-cli login` (necessary to use this script "
"with private models)."
)
} , )
a_ = field(
default=0.75 , metadata={"help": "The ratio of the number of masked tokens in the input sequence."} )
a_ = field(
default=UpperCamelCase_ , metadata={"help": "Whether or not to train with normalized pixel values as target."} )
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = field(
default=1E-3 , metadata={"help": "Base learning rate: absolute_lr = base_lr * total_batch_size / 256."} )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : Union[str, Any] = torch.stack([example["pixel_values"] for example in examples] )
return {"pixel_values": pixel_values}
def SCREAMING_SNAKE_CASE ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
snake_case__ : int = HfArgumentParser((ModelArguments, DataTrainingArguments, CustomTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(".json" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
snake_case__, snake_case__, snake_case__ : List[Any] = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
snake_case__, snake_case__, snake_case__ : Optional[Any] = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry("run_mae" , snake_case_ , snake_case_ )
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s" , datefmt="%m/%d/%Y %H:%M:%S" , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
snake_case__ : int = training_args.get_process_log_level()
logger.setLevel(snake_case_ )
transformers.utils.logging.set_verbosity(snake_case_ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'''Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}'''
+ F'''distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}''' )
logger.info(F'''Training/evaluation parameters {training_args}''' )
# Detecting last checkpoint.
snake_case__ : Any = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
snake_case__ : Optional[int] = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'''Output directory ({training_args.output_dir}) already exists and is not empty. '''
"Use --overwrite_output_dir to overcome." )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'''Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '''
"the `--output_dir` or add `--overwrite_output_dir` to train from scratch." )
# Initialize our dataset.
snake_case__ : List[Any] = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , data_files=data_args.data_files , cache_dir=model_args.cache_dir , use_auth_token=True if model_args.use_auth_token else None , )
# If we don't have a validation split, split off a percentage of train as validation.
snake_case__ : Dict = None if "validation" in ds.keys() else data_args.train_val_split
if isinstance(data_args.train_val_split , snake_case_ ) and data_args.train_val_split > 0.0:
snake_case__ : Optional[Any] = ds["train"].train_test_split(data_args.train_val_split )
snake_case__ : List[Any] = split["train"]
snake_case__ : List[str] = split["test"]
# Load pretrained model and image processor
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
snake_case__ : Tuple = {
"cache_dir": model_args.cache_dir,
"revision": model_args.model_revision,
"use_auth_token": True if model_args.use_auth_token else None,
}
if model_args.config_name:
snake_case__ : Optional[int] = ViTMAEConfig.from_pretrained(model_args.config_name , **snake_case_ )
elif model_args.model_name_or_path:
snake_case__ : Union[str, Any] = ViTMAEConfig.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
snake_case__ : List[Any] = ViTMAEConfig()
logger.warning("You are instantiating a new config instance from scratch." )
if model_args.config_overrides is not None:
logger.info(F'''Overriding config: {model_args.config_overrides}''' )
config.update_from_string(model_args.config_overrides )
logger.info(F'''New config: {config}''' )
# adapt config
config.update(
{
"mask_ratio": model_args.mask_ratio,
"norm_pix_loss": model_args.norm_pix_loss,
} )
# create image processor
if model_args.image_processor_name:
snake_case__ : Optional[int] = ViTImageProcessor.from_pretrained(model_args.image_processor_name , **snake_case_ )
elif model_args.model_name_or_path:
snake_case__ : List[Any] = ViTImageProcessor.from_pretrained(model_args.model_name_or_path , **snake_case_ )
else:
snake_case__ : Optional[int] = ViTImageProcessor()
# create model
if model_args.model_name_or_path:
snake_case__ : Optional[Any] = ViTMAEForPreTraining.from_pretrained(
model_args.model_name_or_path , from_tf=bool(".ckpt" in model_args.model_name_or_path ) , config=snake_case_ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("Training new model from scratch" )
snake_case__ : Tuple = ViTMAEForPreTraining(snake_case_ )
if training_args.do_train:
snake_case__ : Union[str, Any] = ds["train"].column_names
else:
snake_case__ : Any = ds["validation"].column_names
if data_args.image_column_name is not None:
snake_case__ : List[Any] = data_args.image_column_name
elif "image" in column_names:
snake_case__ : Tuple = "image"
elif "img" in column_names:
snake_case__ : List[str] = "img"
else:
snake_case__ : Any = column_names[0]
# transformations as done in original MAE paper
# source: https://github.com/facebookresearch/mae/blob/main/main_pretrain.py
if "shortest_edge" in image_processor.size:
snake_case__ : int = image_processor.size["shortest_edge"]
else:
snake_case__ : List[Any] = (image_processor.size["height"], image_processor.size["width"])
snake_case__ : Optional[Any] = Compose(
[
Lambda(lambda snake_case_ : img.convert("RGB" ) if img.mode != "RGB" else img ),
RandomResizedCrop(snake_case_ , scale=(0.2, 1.0) , interpolation=InterpolationMode.BICUBIC ),
RandomHorizontalFlip(),
ToTensor(),
Normalize(mean=image_processor.image_mean , std=image_processor.image_std ),
] )
def preprocess_images(snake_case_ : Dict ):
snake_case__ : Optional[Any] = [transforms(snake_case_ ) for image in examples[image_column_name]]
return examples
if training_args.do_train:
if "train" not in ds:
raise ValueError("--do_train requires a train dataset" )
if data_args.max_train_samples is not None:
snake_case__ : Any = ds["train"].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
# Set the training transforms
ds["train"].set_transform(snake_case_ )
if training_args.do_eval:
if "validation" not in ds:
raise ValueError("--do_eval requires a validation dataset" )
if data_args.max_eval_samples is not None:
snake_case__ : Tuple = (
ds["validation"].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
ds["validation"].set_transform(snake_case_ )
# Compute absolute learning rate
snake_case__ : int = (
training_args.train_batch_size * training_args.gradient_accumulation_steps * training_args.world_size
)
if training_args.base_learning_rate is not None:
snake_case__ : Optional[Any] = training_args.base_learning_rate * total_train_batch_size / 256
# Initialize our trainer
snake_case__ : Optional[Any] = Trainer(
model=snake_case_ , args=snake_case_ , train_dataset=ds["train"] if training_args.do_train else None , eval_dataset=ds["validation"] if training_args.do_eval else None , tokenizer=snake_case_ , data_collator=snake_case_ , )
# Training
if training_args.do_train:
snake_case__ : Tuple = None
if training_args.resume_from_checkpoint is not None:
snake_case__ : Any = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
snake_case__ : Optional[int] = last_checkpoint
snake_case__ : List[Any] = trainer.train(resume_from_checkpoint=snake_case_ )
trainer.save_model()
trainer.log_metrics("train" , train_result.metrics )
trainer.save_metrics("train" , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
snake_case__ : int = trainer.evaluate()
trainer.log_metrics("eval" , snake_case_ )
trainer.save_metrics("eval" , snake_case_ )
# Write model card and (optionally) push to hub
snake_case__ : Union[str, Any] = {
"tasks": "masked-auto-encoding",
"dataset": data_args.dataset_name,
"tags": ["masked-auto-encoding"],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case_ )
else:
trainer.create_model_card(**snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
# For xla_spawn (TPUs)
main()
if __name__ == "__main__":
main()
| 25 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__lowerCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__lowerCamelCase : str = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__lowerCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def _lowercase ( self : Union[str, Any] , __A : Dict , __A : List[str] , __A : int=None , __A : List[Any]=None , __A : Optional[int]=None , __A : List[Any]=None , __A : Union[str, Any]="auto" , __A : Optional[Any]=-1 , __A : Optional[Any]=0.9 , __A : Any=5 , __A : List[Any]=5_0_0 , __A : Tuple="gpt2-large" , __A : Optional[Any]=-1 , __A : str=1_0_2_4 , __A : Tuple=2_5 , __A : str=5 , __A : Optional[int]=True , __A : Any=2_5 , ):
snake_case__ : List[Any] = compute_mauve(
p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , )
return out
| 25 | 1 |
import warnings
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = ["image_processor", "tokenizer"]
a_ = "ViTImageProcessor"
a_ = ("CLIPTokenizer", "CLIPTokenizerFast")
def __init__( self : Tuple , __A : str=None , __A : Optional[int]=None , **__A : Tuple ):
snake_case__ : List[Any] = None
if "feature_extractor" in kwargs:
warnings.warn(
"The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"
" instead." , __A , )
snake_case__ : Union[str, Any] = kwargs.pop("feature_extractor" )
snake_case__ : str = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("You need to specify an `image_processor`." )
if tokenizer is None:
raise ValueError("You need to specify a `tokenizer`." )
super().__init__(__A , __A )
def __call__( self : Any , __A : Union[str, Any]=None , __A : Dict=None , __A : List[Any]=None , __A : List[Any]=None , **__A : Tuple ):
if text is None and visual_prompt is None and images is None:
raise ValueError("You have to specify either text, visual prompt or images." )
if text is not None and visual_prompt is not None:
raise ValueError("You have to specify exactly one type of prompt. Either text or visual prompt." )
if text is not None:
snake_case__ : str = self.tokenizer(__A , return_tensors=__A , **__A )
if visual_prompt is not None:
snake_case__ : List[str] = self.image_processor(__A , return_tensors=__A , **__A )
if images is not None:
snake_case__ : Optional[int] = self.image_processor(__A , return_tensors=__A , **__A )
if visual_prompt is not None and images is not None:
snake_case__ : Union[str, Any] = {
"pixel_values": image_features.pixel_values,
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
elif text is not None and images is not None:
snake_case__ : Any = image_features.pixel_values
return encoding
elif text is not None:
return encoding
elif visual_prompt is not None:
snake_case__ : Optional[Any] = {
"conditional_pixel_values": prompt_features.pixel_values,
}
return encoding
else:
return BatchEncoding(data=dict(**__A ) , tensor_type=__A )
def _lowercase ( self : Optional[int] , *__A : str , **__A : Tuple ):
return self.tokenizer.batch_decode(*__A , **__A )
def _lowercase ( self : List[str] , *__A : Optional[Any] , **__A : Union[str, Any] ):
return self.tokenizer.decode(*__A , **__A )
@property
def _lowercase ( self : Optional[Any] ):
warnings.warn(
"`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead." , __A , )
return self.image_processor_class
@property
def _lowercase ( self : str ):
warnings.warn(
"`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead." , __A , )
return self.image_processor
| 25 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCamelCase : Union[str, Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCamelCase : List[Any] = concatenate_datasets
__lowerCamelCase : List[str] = DownloadConfig
__lowerCamelCase : Union[str, Any] = DownloadManager
__lowerCamelCase : str = DownloadMode
__lowerCamelCase : Union[str, Any] = DownloadConfig
__lowerCamelCase : List[str] = DownloadMode
__lowerCamelCase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 25 | 1 |
import tempfile
import torch
from diffusers import PNDMScheduler
from .test_schedulers import SchedulerCommonTest
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = (PNDMScheduler,)
a_ = (("num_inference_steps", 5_0),)
def _lowercase ( self : Dict , **__A : Tuple ):
snake_case__ : List[str] = {
"num_train_timesteps": 1_0_0_0,
"beta_start": 0.0_0_0_1,
"beta_end": 0.0_2,
"beta_schedule": "linear",
}
config.update(**__A )
return config
def _lowercase ( self : Optional[Any] , __A : Tuple=0 , **__A : Dict ):
snake_case__ : Optional[int] = dict(self.forward_default_kwargs )
snake_case__ : List[Any] = kwargs.pop("num_inference_steps" , __A )
snake_case__ : Optional[int] = self.dummy_sample
snake_case__ : List[Any] = 0.1 * sample
snake_case__ : Dict = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
snake_case__ : str = self.get_scheduler_config(**__A )
snake_case__ : int = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals
snake_case__ : Dict = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
snake_case__ : Tuple = scheduler_class.from_pretrained(__A )
new_scheduler.set_timesteps(__A )
# copy over dummy past residuals
snake_case__ : List[str] = dummy_past_residuals[:]
snake_case__ : int = scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
snake_case__ : List[Any] = new_scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
snake_case__ : Tuple = scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
snake_case__ : int = new_scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowercase ( self : Union[str, Any] ):
pass
def _lowercase ( self : Any , __A : Dict=0 , **__A : Union[str, Any] ):
snake_case__ : Optional[Any] = dict(self.forward_default_kwargs )
snake_case__ : Optional[int] = kwargs.pop("num_inference_steps" , __A )
snake_case__ : List[Any] = self.dummy_sample
snake_case__ : List[Any] = 0.1 * sample
snake_case__ : List[str] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
for scheduler_class in self.scheduler_classes:
snake_case__ : List[str] = self.get_scheduler_config()
snake_case__ : Union[str, Any] = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# copy over dummy past residuals (must be after setting timesteps)
snake_case__ : Tuple = dummy_past_residuals[:]
with tempfile.TemporaryDirectory() as tmpdirname:
scheduler.save_config(__A )
snake_case__ : Tuple = scheduler_class.from_pretrained(__A )
# copy over dummy past residuals
new_scheduler.set_timesteps(__A )
# copy over dummy past residual (must be after setting timesteps)
snake_case__ : Optional[Any] = dummy_past_residuals[:]
snake_case__ : List[str] = scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
snake_case__ : List[str] = new_scheduler.step_prk(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
snake_case__ : Tuple = scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
snake_case__ : Union[str, Any] = new_scheduler.step_plms(__A , __A , __A , **__A ).prev_sample
assert torch.sum(torch.abs(output - new_output ) ) < 1e-5, "Scheduler outputs are not identical"
def _lowercase ( self : List[str] , **__A : int ):
snake_case__ : Any = self.scheduler_classes[0]
snake_case__ : List[str] = self.get_scheduler_config(**__A )
snake_case__ : List[Any] = scheduler_class(**__A )
snake_case__ : Optional[int] = 1_0
snake_case__ : Dict = self.dummy_model()
snake_case__ : Any = self.dummy_sample_deter
scheduler.set_timesteps(__A )
for i, t in enumerate(scheduler.prk_timesteps ):
snake_case__ : List[str] = model(__A , __A )
snake_case__ : Union[str, Any] = scheduler.step_prk(__A , __A , __A ).prev_sample
for i, t in enumerate(scheduler.plms_timesteps ):
snake_case__ : List[Any] = model(__A , __A )
snake_case__ : List[Any] = scheduler.step_plms(__A , __A , __A ).prev_sample
return sample
def _lowercase ( self : Any ):
snake_case__ : Tuple = dict(self.forward_default_kwargs )
snake_case__ : List[Any] = kwargs.pop("num_inference_steps" , __A )
for scheduler_class in self.scheduler_classes:
snake_case__ : int = self.get_scheduler_config()
snake_case__ : Any = scheduler_class(**__A )
snake_case__ : Optional[Any] = self.dummy_sample
snake_case__ : Optional[int] = 0.1 * sample
if num_inference_steps is not None and hasattr(__A , "set_timesteps" ):
scheduler.set_timesteps(__A )
elif num_inference_steps is not None and not hasattr(__A , "set_timesteps" ):
snake_case__ : Tuple = num_inference_steps
# copy over dummy past residuals (must be done after set_timesteps)
snake_case__ : Optional[int] = [residual + 0.2, residual + 0.1_5, residual + 0.1, residual + 0.0_5]
snake_case__ : Optional[Any] = dummy_past_residuals[:]
snake_case__ : Any = scheduler.step_prk(__A , 0 , __A , **__A ).prev_sample
snake_case__ : Dict = scheduler.step_prk(__A , 1 , __A , **__A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
snake_case__ : Dict = scheduler.step_plms(__A , 0 , __A , **__A ).prev_sample
snake_case__ : Optional[Any] = scheduler.step_plms(__A , 1 , __A , **__A ).prev_sample
self.assertEqual(output_a.shape , sample.shape )
self.assertEqual(output_a.shape , output_a.shape )
def _lowercase ( self : Optional[Any] ):
for timesteps in [1_0_0, 1_0_0_0]:
self.check_over_configs(num_train_timesteps=__A )
def _lowercase ( self : Any ):
for steps_offset in [0, 1]:
self.check_over_configs(steps_offset=__A )
snake_case__ : Optional[Any] = self.scheduler_classes[0]
snake_case__ : Optional[int] = self.get_scheduler_config(steps_offset=1 )
snake_case__ : str = scheduler_class(**__A )
scheduler.set_timesteps(1_0 )
assert torch.equal(
scheduler.timesteps , torch.LongTensor(
[9_0_1, 8_5_1, 8_5_1, 8_0_1, 8_0_1, 7_5_1, 7_5_1, 7_0_1, 7_0_1, 6_5_1, 6_5_1, 6_0_1, 6_0_1, 5_0_1, 4_0_1, 3_0_1, 2_0_1, 1_0_1, 1] ) , )
def _lowercase ( self : Tuple ):
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1] , [0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__A , beta_end=__A )
def _lowercase ( self : Union[str, Any] ):
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__A )
def _lowercase ( self : Any ):
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__A )
def _lowercase ( self : int ):
for t in [1, 5, 1_0]:
self.check_over_forward(time_step=__A )
def _lowercase ( self : Dict ):
for t, num_inference_steps in zip([1, 5, 1_0] , [1_0, 5_0, 1_0_0] ):
self.check_over_forward(num_inference_steps=__A )
def _lowercase ( self : List[str] ):
# earlier version of set_timesteps() caused an error indexing alpha's with inference steps as power of 3
snake_case__ : List[Any] = 2_7
for scheduler_class in self.scheduler_classes:
snake_case__ : List[Any] = self.dummy_sample
snake_case__ : Union[str, Any] = 0.1 * sample
snake_case__ : Dict = self.get_scheduler_config()
snake_case__ : Dict = scheduler_class(**__A )
scheduler.set_timesteps(__A )
# before power of 3 fix, would error on first step, so we only need to do two
for i, t in enumerate(scheduler.prk_timesteps[:2] ):
snake_case__ : Optional[int] = scheduler.step_prk(__A , __A , __A ).prev_sample
def _lowercase ( self : Union[str, Any] ):
with self.assertRaises(__A ):
snake_case__ : Any = self.scheduler_classes[0]
snake_case__ : Optional[int] = self.get_scheduler_config()
snake_case__ : List[str] = scheduler_class(**__A )
scheduler.step_plms(self.dummy_sample , 1 , self.dummy_sample ).prev_sample
def _lowercase ( self : Optional[Any] ):
snake_case__ : int = self.full_loop()
snake_case__ : Optional[Any] = torch.sum(torch.abs(__A ) )
snake_case__ : Any = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_9_8.1_3_1_8 ) < 1e-2
assert abs(result_mean.item() - 0.2_5_8_0 ) < 1e-3
def _lowercase ( self : Any ):
snake_case__ : Dict = self.full_loop(prediction_type="v_prediction" )
snake_case__ : List[Any] = torch.sum(torch.abs(__A ) )
snake_case__ : Optional[Any] = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 6_7.3_9_8_6 ) < 1e-2
assert abs(result_mean.item() - 0.0_8_7_8 ) < 1e-3
def _lowercase ( self : Dict ):
# We specify different beta, so that the first alpha is 0.99
snake_case__ : List[str] = self.full_loop(set_alpha_to_one=__A , beta_start=0.0_1 )
snake_case__ : Dict = torch.sum(torch.abs(__A ) )
snake_case__ : str = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 2_3_0.0_3_9_9 ) < 1e-2
assert abs(result_mean.item() - 0.2_9_9_5 ) < 1e-3
def _lowercase ( self : Tuple ):
# We specify different beta, so that the first alpha is 0.99
snake_case__ : Union[str, Any] = self.full_loop(set_alpha_to_one=__A , beta_start=0.0_1 )
snake_case__ : int = torch.sum(torch.abs(__A ) )
snake_case__ : Dict = torch.mean(torch.abs(__A ) )
assert abs(result_sum.item() - 1_8_6.9_4_8_2 ) < 1e-2
assert abs(result_mean.item() - 0.2_4_3_4 ) < 1e-3
| 25 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : str = [True] * limit
snake_case__ : str = False
snake_case__ : str = False
snake_case__ : str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
snake_case__ : Optional[Any] = i * 2
while index < limit:
snake_case__ : Union[str, Any] = False
snake_case__ : Any = index + i
snake_case__ : Optional[Any] = [2]
for i in range(3 , snake_case_ , 2 ):
if is_prime[i]:
primes.append(snake_case_ )
return primes
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000000 ):
snake_case__ : Optional[int] = prime_sieve(snake_case_ )
snake_case__ : List[Any] = 0
snake_case__ : List[str] = 0
for i in range(len(snake_case_ ) ):
for j in range(i + length , len(snake_case_ ) ):
snake_case__ : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
snake_case__ : Tuple = j - i
snake_case__ : str = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 25 | 1 |
import math
def SCREAMING_SNAKE_CASE ( snake_case_ : list , snake_case_ : int = 0 , snake_case_ : int = 0 ):
snake_case__ : List[Any] = end or len(snake_case_ )
for i in range(snake_case_ , snake_case_ ):
snake_case__ : Union[str, Any] = i
snake_case__ : Optional[int] = array[i]
while temp_index != start and temp_index_value < array[temp_index - 1]:
snake_case__ : Optional[int] = array[temp_index - 1]
temp_index -= 1
snake_case__ : Any = temp_index_value
return array
def SCREAMING_SNAKE_CASE ( snake_case_ : list , snake_case_ : int , snake_case_ : int ): # Max Heap
snake_case__ : str = index
snake_case__ : List[Any] = 2 * index + 1 # Left Node
snake_case__ : Optional[Any] = 2 * index + 2 # Right Node
if left_index < heap_size and array[largest] < array[left_index]:
snake_case__ : int = left_index
if right_index < heap_size and array[largest] < array[right_index]:
snake_case__ : List[Any] = right_index
if largest != index:
snake_case__, snake_case__ : Optional[Any] = array[largest], array[index]
heapify(snake_case_ , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : list ):
snake_case__ : Any = len(snake_case_ )
for i in range(n // 2 , -1 , -1 ):
heapify(snake_case_ , snake_case_ , snake_case_ )
for i in range(n - 1 , 0 , -1 ):
snake_case__, snake_case__ : Dict = array[0], array[i]
heapify(snake_case_ , 0 , snake_case_ )
return array
def SCREAMING_SNAKE_CASE ( snake_case_ : list , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
if (array[first_index] > array[middle_index]) != (
array[first_index] > array[last_index]
):
return array[first_index]
elif (array[middle_index] > array[first_index]) != (
array[middle_index] > array[last_index]
):
return array[middle_index]
else:
return array[last_index]
def SCREAMING_SNAKE_CASE ( snake_case_ : list , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
snake_case__ : Optional[Any] = low
snake_case__ : Optional[int] = high
while True:
while array[i] < pivot:
i += 1
j -= 1
while pivot < array[j]:
j -= 1
if i >= j:
return i
snake_case__, snake_case__ : Union[str, Any] = array[j], array[i]
i += 1
def SCREAMING_SNAKE_CASE ( snake_case_ : list ):
if len(snake_case_ ) == 0:
return array
snake_case__ : Dict = 2 * math.ceil(math.loga(len(snake_case_ ) ) )
snake_case__ : Tuple = 16
return intro_sort(snake_case_ , 0 , len(snake_case_ ) , snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : list , snake_case_ : int , snake_case_ : int , snake_case_ : int , snake_case_ : int ):
while end - start > size_threshold:
if max_depth == 0:
return heap_sort(snake_case_ )
max_depth -= 1
snake_case__ : Tuple = median_of_a(snake_case_ , snake_case_ , start + ((end - start) // 2) + 1 , end - 1 )
snake_case__ : int = partition(snake_case_ , snake_case_ , snake_case_ , snake_case_ )
intro_sort(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
snake_case__ : List[Any] = p
return insertion_sort(snake_case_ , snake_case_ , snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod()
__lowerCamelCase : Optional[Any] = input("""Enter numbers separated by a comma : """).strip()
__lowerCamelCase : int = [float(item) for item in user_input.split(""",""")]
print(sort(unsorted))
| 25 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Optional[Any] = parent
snake_case__ : str = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : Optional[Any] = min_resolution
snake_case__ : List[str] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : str = size
snake_case__ : str = do_normalize
snake_case__ : Optional[Any] = image_mean
snake_case__ : List[str] = image_std
snake_case__ : List[str] = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : Tuple = do_pad
def _lowercase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ):
if not batched:
snake_case__ : List[Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : str = image.size
else:
snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Any = int(self.size["shortest_edge"] * h / w )
snake_case__ : Any = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Any = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Tuple = self.size["shortest_edge"]
snake_case__ : int = self.size["shortest_edge"]
else:
snake_case__ : Any = []
for image in image_inputs:
snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : int = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Tuple ):
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : str ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : int ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[Any] ):
# prepare image and target
snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Tuple = json.loads(f.read() )
snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : str = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image, target and masks_path
snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__lowerCamelCase : Dict = {"""configuration_glpn""": ["""GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP""", """GLPNConfig"""]}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = ["""GLPNFeatureExtractor"""]
__lowerCamelCase : Dict = ["""GLPNImageProcessor"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Optional[Any] = [
"""GLPN_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""GLPNForDepthEstimation""",
"""GLPNLayer""",
"""GLPNModel""",
"""GLPNPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_glpn import GLPN_PRETRAINED_CONFIG_ARCHIVE_MAP, GLPNConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_glpn import GLPNFeatureExtractor
from .image_processing_glpn import GLPNImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_glpn import (
GLPN_PRETRAINED_MODEL_ARCHIVE_LIST,
GLPNForDepthEstimation,
GLPNLayer,
GLPNModel,
GLPNPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[Any] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCamelCase : Tuple = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
snake_case__ : Optional[int] = bs[:]
snake_case__ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
snake_case__ : Dict = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__ : Dict = set()
snake_case__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : List[Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ):
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="utf-8" ) as vocab_handle:
snake_case__ : Any = json.load(__A )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
snake_case__ : Union[str, Any] = errors # how to handle errors in decoding
snake_case__ : Any = bytes_to_unicode()
snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="utf-8" ) as merges_handle:
snake_case__ : str = merges_handle.read().split("\n" )[1:-1]
snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Optional[int] = {}
snake_case__ : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _lowercase ( self : List[Any] ):
return len(self.encoder )
def _lowercase ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Optional[Any] , __A : Optional[int] ):
if token in self.cache:
return self.cache[token]
snake_case__ : Union[str, Any] = tuple(__A )
snake_case__ : List[Any] = get_pairs(__A )
if not pairs:
return token
while True:
snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : Dict = bigram
snake_case__ : str = []
snake_case__ : Union[str, Any] = 0
while i < len(__A ):
try:
snake_case__ : Dict = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : str = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : str = tuple(__A )
snake_case__ : int = new_word
if len(__A ) == 1:
break
else:
snake_case__ : List[str] = get_pairs(__A )
snake_case__ : List[Any] = " ".join(__A )
snake_case__ : Optional[int] = word
return word
def _lowercase ( self : Optional[Any] , __A : Optional[Any] ):
snake_case__ : List[str] = []
for token in re.findall(self.pat , __A ):
snake_case__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) )
return bpe_tokens
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , __A : Optional[Any] ):
return self.decoder.get(__A )
def _lowercase ( self : Union[str, Any] , __A : Dict ):
snake_case__ : Optional[Any] = "".join(__A )
snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : str = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
snake_case__ : str = 0
with open(__A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case__ : int = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ):
snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
snake_case__ : Optional[int] = " " + text
return (text, kwargs)
def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
snake_case__ : Optional[Any] = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case__ : int = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case__ : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 25 | 1 |
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def SCREAMING_SNAKE_CASE ( snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Optional[int] ):
# Initialise PyTorch model
snake_case__ : int = LxmertConfig.from_json_file(snake_case_ )
print(F'''Building PyTorch model from configuration: {config}''' )
snake_case__ : Union[str, Any] = LxmertForPreTraining(snake_case_ )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(snake_case_ , snake_case_ , snake_case_ )
# Save pytorch-model
print(F'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--config_file""",
default=None,
type=str,
required=True,
help="""The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.""",
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__lowerCamelCase : List[str] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path)
| 25 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 25 | 1 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : Dict = int(snake_case_ )
if decimal in (0, 1): # Exit cases for the recursion
return str(snake_case_ )
snake_case__, snake_case__ : Dict = divmod(snake_case_ , 2 )
return binary_recursive(snake_case_ ) + str(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Union[str, Any] = str(snake_case_ ).strip()
if not number:
raise ValueError("No input value was provided" )
snake_case__ : Optional[int] = "-" if number.startswith("-" ) else ""
snake_case__ : Optional[Any] = number.lstrip("-" )
if not number.isnumeric():
raise ValueError("Input value is not an integer" )
return F'''{negative}0b{binary_recursive(int(snake_case_ ) )}'''
if __name__ == "__main__":
from doctest import testmod
testmod()
| 25 |
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Any = [0] * len(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
# use last results for better performance - dynamic programming
snake_case__ : Union[str, Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
snake_case__ : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
snake_case__ : int = j
return prefix_result
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return max(prefix_function(snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : List[Any]=1_3 , __A : str=7 , __A : Union[str, Any]=True , __A : Tuple=True , __A : Tuple=True , __A : Optional[Any]=True , __A : List[Any]=9_9 , __A : List[str]=[1, 1, 2] , __A : Optional[Any]=1 , __A : int=3_2 , __A : Optional[Any]=4 , __A : Optional[Any]=8 , __A : List[str]=3_7 , __A : Optional[int]="gelu_new" , __A : str=0.1 , __A : Union[str, Any]=0.1 , __A : Union[str, Any]=0.0 , __A : List[Any]=5_1_2 , __A : List[Any]=3 , __A : Optional[Any]=0.0_2 , __A : Optional[int]=3 , __A : List[str]=4 , __A : int=None , __A : Tuple=False , ):
snake_case__ : List[Any] = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Dict = seq_length
snake_case__ : Tuple = is_training
snake_case__ : Dict = use_input_mask
snake_case__ : Union[str, Any] = use_token_type_ids
snake_case__ : Any = use_labels
snake_case__ : Union[str, Any] = vocab_size
snake_case__ : Any = block_sizes
snake_case__ : Optional[Any] = num_decoder_layers
snake_case__ : Dict = d_model
snake_case__ : Dict = n_head
snake_case__ : Union[str, Any] = d_head
snake_case__ : Dict = d_inner
snake_case__ : List[str] = hidden_act
snake_case__ : int = hidden_dropout
snake_case__ : Any = attention_dropout
snake_case__ : List[Any] = activation_dropout
snake_case__ : int = max_position_embeddings
snake_case__ : Any = type_vocab_size
snake_case__ : List[str] = 2
snake_case__ : Union[str, Any] = num_labels
snake_case__ : Dict = num_choices
snake_case__ : Optional[int] = scope
snake_case__ : int = initializer_std
# Used in the tests to check the size of the first attention layer
snake_case__ : Optional[Any] = n_head
# Used in the tests to check the size of the first hidden state
snake_case__ : Union[str, Any] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
snake_case__ : Optional[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
snake_case__ : int = self.num_hidden_layers + 2
def _lowercase ( self : List[str] ):
snake_case__ : int = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Optional[int] = None
if self.use_input_mask:
snake_case__ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : List[str] = None
if self.use_token_type_ids:
snake_case__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : List[Any] = None
snake_case__ : str = None
snake_case__ : Tuple = None
if self.use_labels:
snake_case__ : Tuple = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : Optional[int] = FunnelConfig(
vocab_size=self.vocab_size , block_sizes=self.block_sizes , num_decoder_layers=self.num_decoder_layers , d_model=self.d_model , n_head=self.n_head , d_head=self.d_head , d_inner=self.d_inner , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , activation_dropout=self.activation_dropout , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_std=self.initializer_std , )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def _lowercase ( self : Optional[Any] , __A : Optional[int] , __A : Any , __A : Tuple , __A : int , __A : Dict , __A : Union[str, Any] , __A : int , ):
snake_case__ : int = TFFunnelModel(config=__A )
snake_case__ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ : Tuple = model(__A )
snake_case__ : List[str] = [input_ids, input_mask]
snake_case__ : str = model(__A )
snake_case__ : Optional[int] = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case__ : Optional[int] = False
snake_case__ : Optional[int] = TFFunnelModel(config=__A )
snake_case__ : Dict = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
snake_case__ : Any = False
snake_case__ : Any = TFFunnelModel(config=__A )
snake_case__ : int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.d_model) )
def _lowercase ( self : Optional[int] , __A : Dict , __A : List[str] , __A : List[Any] , __A : Optional[int] , __A : Union[str, Any] , __A : int , __A : List[Any] , ):
snake_case__ : Any = TFFunnelBaseModel(config=__A )
snake_case__ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ : Optional[int] = model(__A )
snake_case__ : Optional[Any] = [input_ids, input_mask]
snake_case__ : List[str] = model(__A )
snake_case__ : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
snake_case__ : List[Any] = False
snake_case__ : Dict = TFFunnelBaseModel(config=__A )
snake_case__ : int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 3, self.d_model) )
snake_case__ : str = False
snake_case__ : Union[str, Any] = TFFunnelBaseModel(config=__A )
snake_case__ : int = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, 2, self.d_model) )
def _lowercase ( self : List[Any] , __A : List[str] , __A : int , __A : Optional[int] , __A : Dict , __A : Any , __A : Optional[Any] , __A : Dict , ):
snake_case__ : List[Any] = TFFunnelForPreTraining(config=__A )
snake_case__ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ : str = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : str , __A : Optional[Any] , __A : Optional[int] , __A : Optional[Any] , __A : Optional[int] , __A : Optional[int] , __A : Dict , __A : Optional[Any] , ):
snake_case__ : Union[str, Any] = TFFunnelForMaskedLM(config=__A )
snake_case__ : str = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ : Any = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : List[str] , __A : List[str] , __A : Union[str, Any] , __A : Optional[Any] , __A : List[Any] , __A : Union[str, Any] , __A : str , __A : Tuple , ):
snake_case__ : Optional[Any] = self.num_labels
snake_case__ : str = TFFunnelForSequenceClassification(config=__A )
snake_case__ : Tuple = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ : Optional[Any] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Any , __A : Optional[Any] , __A : List[Any] , __A : Any , __A : Any , __A : Any , __A : Dict , __A : Optional[Any] , ):
snake_case__ : Optional[int] = self.num_choices
snake_case__ : Union[str, Any] = TFFunnelForMultipleChoice(config=__A )
snake_case__ : List[str] = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
snake_case__ : str = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
snake_case__ : int = tf.tile(tf.expand_dims(__A , 1 ) , (1, self.num_choices, 1) )
snake_case__ : Optional[int] = {
"input_ids": multiple_choice_inputs_ids,
"attention_mask": multiple_choice_input_mask,
"token_type_ids": multiple_choice_token_type_ids,
}
snake_case__ : Optional[int] = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : Optional[int] , __A : int , __A : Optional[int] , __A : Dict , __A : str , __A : List[Any] , __A : Optional[int] , __A : List[str] , ):
snake_case__ : List[str] = self.num_labels
snake_case__ : str = TFFunnelForTokenClassification(config=__A )
snake_case__ : int = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ : Any = model(__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : Optional[int] , __A : Tuple , __A : List[Any] , __A : Union[str, Any] , __A : Dict , __A : Dict , __A : Dict , __A : Dict , ):
snake_case__ : int = TFFunnelForQuestionAnswering(config=__A )
snake_case__ : List[Any] = {"input_ids": input_ids, "attention_mask": input_mask, "token_type_ids": token_type_ids}
snake_case__ : int = model(__A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : int ):
snake_case__ : Tuple = self.prepare_config_and_inputs()
(
(
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
),
) : Tuple = config_and_inputs
snake_case__ : Any = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
a_ = (
{
"feature-extraction": (TFFunnelBaseModel, TFFunnelModel),
"fill-mask": TFFunnelForMaskedLM,
"question-answering": TFFunnelForQuestionAnswering,
"text-classification": TFFunnelForSequenceClassification,
"token-classification": TFFunnelForTokenClassification,
"zero-shot": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def _lowercase ( self : Dict ):
snake_case__ : Dict = TFFunnelModelTester(self )
snake_case__ : List[Any] = ConfigTester(self , config_class=__A )
def _lowercase ( self : Union[str, Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : List[str] ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def _lowercase ( self : int ):
snake_case__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def _lowercase ( self : Tuple ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__A )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
@require_tf
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
a_ = False
a_ = False
def _lowercase ( self : Any ):
snake_case__ : Any = TFFunnelModelTester(self , base=__A )
snake_case__ : Dict = ConfigTester(self , config_class=__A )
def _lowercase ( self : List[str] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Optional[Any] ):
snake_case__ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*__A )
def _lowercase ( self : List[Any] ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def _lowercase ( self : Any ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
| 25 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[int] = get_logger()
__lowerCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case__ : str = str(jax.devices()[0] )
snake_case__ : str = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def _lowercase ( self : Optional[Any] , __A : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def _lowercase ( self : int , __A : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ : Optional[int] = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case__ : Any = {"dtype": jnp.intaa}
else:
snake_case__ : Tuple = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
snake_case__ : Optional[Any] = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ):
snake_case__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def _lowercase ( self : Tuple , __A : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def _lowercase ( self : Optional[int] , __A : pa.Table ):
snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A )
snake_case__ : Tuple = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def _lowercase ( self : Optional[Any] , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A )
snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
snake_case__ : Dict = self._consolidate(__A )
return column
def _lowercase ( self : str , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A )
snake_case__ : int = self.python_features_decoder.decode_batch(__A )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
for column_name in batch:
snake_case__ : Any = self._consolidate(batch[column_name] )
return batch
| 25 | 1 |
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
if n == 1 or not isinstance(snake_case_ , snake_case_ ):
return 0
elif n == 2:
return 1
else:
snake_case__ : Dict = [0, 1]
for i in range(2 , n + 1 ):
sequence.append(sequence[i - 1] + sequence[i - 2] )
return sequence[n]
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : Optional[Any] = 0
snake_case__ : List[str] = 2
while digits < n:
index += 1
snake_case__ : Union[str, Any] = len(str(fibonacci(snake_case_ ) ) )
return index
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000 ):
return fibonacci_digits_index(snake_case_ )
if __name__ == "__main__":
print(solution(int(str(input()).strip())))
| 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 1 |
from dataclasses import dataclass
from typing import Dict, Optional, Union
import torch
import torch.nn.functional as F
from torch import nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .attention import BasicTransformerBlock
from .attention_processor import AttentionProcessor, AttnProcessor
from .embeddings import TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
@dataclass
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = 42
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ ):
"""simple docstring"""
@register_to_config
def __init__( self : int , __A : int = 3_2 , __A : int = 6_4 , __A : int = 2_0 , __A : int = 7_6_8 , __A : Dict=7_7 , __A : Any=4 , __A : float = 0.0 , __A : str = "silu" , __A : Optional[str] = None , __A : Optional[str] = None , __A : Optional[str] = "linear" , __A : Optional[str] = "prd" , __A : Optional[int] = None , __A : Optional[int] = None , __A : Optional[int] = None , ):
super().__init__()
snake_case__ : Dict = num_attention_heads
snake_case__ : Any = attention_head_dim
snake_case__ : Union[str, Any] = num_attention_heads * attention_head_dim
snake_case__ : Optional[Any] = additional_embeddings
snake_case__ : Tuple = time_embed_dim or inner_dim
snake_case__ : Optional[int] = embedding_proj_dim or embedding_dim
snake_case__ : Dict = clip_embed_dim or embedding_dim
snake_case__ : List[Any] = Timesteps(__A , __A , 0 )
snake_case__ : int = TimestepEmbedding(__A , __A , out_dim=__A , act_fn=__A )
snake_case__ : List[str] = nn.Linear(__A , __A )
if embedding_proj_norm_type is None:
snake_case__ : int = None
elif embedding_proj_norm_type == "layer":
snake_case__ : int = nn.LayerNorm(__A )
else:
raise ValueError(f'''unsupported embedding_proj_norm_type: {embedding_proj_norm_type}''' )
snake_case__ : List[str] = nn.Linear(__A , __A )
if encoder_hid_proj_type is None:
snake_case__ : str = None
elif encoder_hid_proj_type == "linear":
snake_case__ : Optional[Any] = nn.Linear(__A , __A )
else:
raise ValueError(f'''unsupported encoder_hid_proj_type: {encoder_hid_proj_type}''' )
snake_case__ : Any = nn.Parameter(torch.zeros(1 , num_embeddings + additional_embeddings , __A ) )
if added_emb_type == "prd":
snake_case__ : int = nn.Parameter(torch.zeros(1 , 1 , __A ) )
elif added_emb_type is None:
snake_case__ : Tuple = None
else:
raise ValueError(
f'''`added_emb_type`: {added_emb_type} is not supported. Make sure to choose one of `\'prd\'` or `None`.''' )
snake_case__ : List[str] = nn.ModuleList(
[
BasicTransformerBlock(
__A , __A , __A , dropout=__A , activation_fn="gelu" , attention_bias=__A , )
for d in range(__A )
] )
if norm_in_type == "layer":
snake_case__ : Any = nn.LayerNorm(__A )
elif norm_in_type is None:
snake_case__ : str = None
else:
raise ValueError(f'''Unsupported norm_in_type: {norm_in_type}.''' )
snake_case__ : Optional[int] = nn.LayerNorm(__A )
snake_case__ : Optional[Any] = nn.Linear(__A , __A )
snake_case__ : Optional[Any] = torch.full(
[num_embeddings + additional_embeddings, num_embeddings + additional_embeddings] , -1_0_0_0_0.0 )
causal_attention_mask.triu_(1 )
snake_case__ : int = causal_attention_mask[None, ...]
self.register_buffer("causal_attention_mask" , __A , persistent=__A )
snake_case__ : Tuple = nn.Parameter(torch.zeros(1 , __A ) )
snake_case__ : Union[str, Any] = nn.Parameter(torch.zeros(1 , __A ) )
@property
# Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors
def _lowercase ( self : int ):
snake_case__ : Tuple = {}
def fn_recursive_add_processors(__A : str , __A : torch.nn.Module , __A : Dict[str, AttentionProcessor] ):
if hasattr(__A , "set_processor" ):
snake_case__ : List[str] = module.processor
for sub_name, child in module.named_children():
fn_recursive_add_processors(f'''{name}.{sub_name}''' , __A , __A )
return processors
for name, module in self.named_children():
fn_recursive_add_processors(__A , __A , __A )
return processors
def _lowercase ( self : Any , __A : Union[AttentionProcessor, Dict[str, AttentionProcessor]] ):
snake_case__ : int = len(self.attn_processors.keys() )
if isinstance(__A , __A ) and len(__A ) != count:
raise ValueError(
f'''A dict of processors was passed, but the number of processors {len(__A )} does not match the'''
f''' number of attention layers: {count}. Please make sure to pass {count} processor classes.''' )
def fn_recursive_attn_processor(__A : str , __A : torch.nn.Module , __A : Dict ):
if hasattr(__A , "set_processor" ):
if not isinstance(__A , __A ):
module.set_processor(__A )
else:
module.set_processor(processor.pop(f'''{name}.processor''' ) )
for sub_name, child in module.named_children():
fn_recursive_attn_processor(f'''{name}.{sub_name}''' , __A , __A )
for name, module in self.named_children():
fn_recursive_attn_processor(__A , __A , __A )
def _lowercase ( self : str ):
self.set_attn_processor(AttnProcessor() )
def _lowercase ( self : str , __A : Any , __A : Union[torch.Tensor, float, int] , __A : torch.FloatTensor , __A : Optional[torch.FloatTensor] = None , __A : Optional[torch.BoolTensor] = None , __A : bool = True , ):
snake_case__ : List[str] = hidden_states.shape[0]
snake_case__ : int = timestep
if not torch.is_tensor(__A ):
snake_case__ : Dict = torch.tensor([timesteps] , dtype=torch.long , device=hidden_states.device )
elif torch.is_tensor(__A ) and len(timesteps.shape ) == 0:
snake_case__ : Union[str, Any] = timesteps[None].to(hidden_states.device )
# broadcast to batch dimension in a way that's compatible with ONNX/Core ML
snake_case__ : Optional[Any] = timesteps * torch.ones(__A , dtype=timesteps.dtype , device=timesteps.device )
snake_case__ : Any = self.time_proj(__A )
# timesteps does not contain any weights and will always return f32 tensors
# but time_embedding might be fp16, so we need to cast here.
snake_case__ : int = timesteps_projected.to(dtype=self.dtype )
snake_case__ : List[str] = self.time_embedding(__A )
if self.embedding_proj_norm is not None:
snake_case__ : List[str] = self.embedding_proj_norm(__A )
snake_case__ : str = self.embedding_proj(__A )
if self.encoder_hidden_states_proj is not None and encoder_hidden_states is not None:
snake_case__ : Any = self.encoder_hidden_states_proj(__A )
elif self.encoder_hidden_states_proj is not None and encoder_hidden_states is None:
raise ValueError("`encoder_hidden_states_proj` requires `encoder_hidden_states` to be set" )
snake_case__ : List[str] = self.proj_in(__A )
snake_case__ : Union[str, Any] = self.positional_embedding.to(hidden_states.dtype )
snake_case__ : Optional[int] = []
snake_case__ : Dict = 0
if encoder_hidden_states is not None:
additional_embeds.append(__A )
additional_embeddings_len += encoder_hidden_states.shape[1]
if len(proj_embeddings.shape ) == 2:
snake_case__ : Tuple = proj_embeddings[:, None, :]
if len(hidden_states.shape ) == 2:
snake_case__ : Dict = hidden_states[:, None, :]
snake_case__ : Any = additional_embeds + [
proj_embeddings,
time_embeddings[:, None, :],
hidden_states,
]
if self.prd_embedding is not None:
snake_case__ : Any = self.prd_embedding.to(hidden_states.dtype ).expand(__A , -1 , -1 )
additional_embeds.append(__A )
snake_case__ : Dict = torch.cat(
__A , dim=1 , )
# Allow positional_embedding to not include the `addtional_embeddings` and instead pad it with zeros for these additional tokens
snake_case__ : List[Any] = additional_embeddings_len + proj_embeddings.shape[1] + 1
if positional_embeddings.shape[1] < hidden_states.shape[1]:
snake_case__ : Union[str, Any] = F.pad(
__A , (
0,
0,
additional_embeddings_len,
self.prd_embedding.shape[1] if self.prd_embedding is not None else 0,
) , value=0.0 , )
snake_case__ : List[Any] = hidden_states + positional_embeddings
if attention_mask is not None:
snake_case__ : Any = (1 - attention_mask.to(hidden_states.dtype )) * -1_0_0_0_0.0
snake_case__ : Tuple = F.pad(__A , (0, self.additional_embeddings) , value=0.0 )
snake_case__ : Dict = (attention_mask[:, None, :] + self.causal_attention_mask).to(hidden_states.dtype )
snake_case__ : List[str] = attention_mask.repeat_interleave(self.config.num_attention_heads , dim=0 )
if self.norm_in is not None:
snake_case__ : Optional[int] = self.norm_in(__A )
for block in self.transformer_blocks:
snake_case__ : Union[str, Any] = block(__A , attention_mask=__A )
snake_case__ : Optional[int] = self.norm_out(__A )
if self.prd_embedding is not None:
snake_case__ : Dict = hidden_states[:, -1]
else:
snake_case__ : Optional[int] = hidden_states[:, additional_embeddings_len:]
snake_case__ : Union[str, Any] = self.proj_to_clip_embeddings(__A )
if not return_dict:
return (predicted_image_embedding,)
return PriorTransformerOutput(predicted_image_embedding=__A )
def _lowercase ( self : Any , __A : Optional[int] ):
snake_case__ : List[str] = (prior_latents * self.clip_std) + self.clip_mean
return prior_latents
| 25 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Tuple = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def _lowercase ( self : Dict ):
snake_case__ : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Union[str, Any] = get_activation("gelu" )
snake_case__ : int = get_activation("gelu_10" )
snake_case__ : Optional[int] = torch_builtin(__A )
snake_case__ : Dict = geluaa(__A )
snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowercase ( self : str ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__A ):
get_activation("bogus" )
with self.assertRaises(__A ):
get_activation(__A )
def _lowercase ( self : List[str] ):
snake_case__ : List[str] = get_activation("gelu" )
snake_case__ : Any = 1
snake_case__ : Union[str, Any] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
snake_case__ : int = acta.a
| 25 | 1 |
import inspect
import unittest
import warnings
from math import ceil, floor
from transformers import LevitConfig
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING,
MODEL_MAPPING,
LevitForImageClassification,
LevitForImageClassificationWithTeacher,
LevitModel,
)
from transformers.models.levit.modeling_levit import LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
def _lowercase ( self : Any ):
snake_case__ : List[Any] = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A , "hidden_sizes" ) )
self.parent.assertTrue(hasattr(__A , "num_attention_heads" ) )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Any , __A : Optional[int] , __A : Dict=1_3 , __A : Union[str, Any]=6_4 , __A : int=3 , __A : List[str]=3 , __A : Optional[Any]=2 , __A : Dict=1 , __A : Optional[int]=1_6 , __A : Any=[1_2_8, 2_5_6, 3_8_4] , __A : List[str]=[4, 6, 8] , __A : Optional[Any]=[2, 3, 4] , __A : str=[1_6, 1_6, 1_6] , __A : Dict=0 , __A : Dict=[2, 2, 2] , __A : Dict=[2, 2, 2] , __A : Dict=0.0_2 , __A : Dict=True , __A : Dict=True , __A : str=2 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : Union[str, Any] = batch_size
snake_case__ : List[Any] = image_size
snake_case__ : Tuple = num_channels
snake_case__ : int = kernel_size
snake_case__ : Dict = stride
snake_case__ : Union[str, Any] = padding
snake_case__ : int = hidden_sizes
snake_case__ : List[str] = num_attention_heads
snake_case__ : int = depths
snake_case__ : Optional[Any] = key_dim
snake_case__ : Dict = drop_path_rate
snake_case__ : Union[str, Any] = patch_size
snake_case__ : str = attention_ratio
snake_case__ : Union[str, Any] = mlp_ratio
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Optional[Any] = [
["Subsample", key_dim[0], hidden_sizes[0] // key_dim[0], 4, 2, 2],
["Subsample", key_dim[0], hidden_sizes[1] // key_dim[0], 4, 2, 2],
]
snake_case__ : Union[str, Any] = is_training
snake_case__ : Dict = use_labels
snake_case__ : List[Any] = num_labels
snake_case__ : Dict = initializer_range
def _lowercase ( self : Tuple ):
snake_case__ : int = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
snake_case__ : Tuple = None
if self.use_labels:
snake_case__ : int = ids_tensor([self.batch_size] , self.num_labels )
snake_case__ : Dict = self.get_config()
return config, pixel_values, labels
def _lowercase ( self : Dict ):
return LevitConfig(
image_size=self.image_size , num_channels=self.num_channels , kernel_size=self.kernel_size , stride=self.stride , padding=self.padding , patch_size=self.patch_size , hidden_sizes=self.hidden_sizes , num_attention_heads=self.num_attention_heads , depths=self.depths , key_dim=self.key_dim , drop_path_rate=self.drop_path_rate , mlp_ratio=self.mlp_ratio , attention_ratio=self.attention_ratio , initializer_range=self.initializer_range , down_ops=self.down_ops , )
def _lowercase ( self : Dict , __A : int , __A : List[str] , __A : List[str] ):
snake_case__ : Any = LevitModel(config=__A )
model.to(__A )
model.eval()
snake_case__ : Optional[int] = model(__A )
snake_case__ : Optional[int] = (self.image_size, self.image_size)
snake_case__, snake_case__ : Any = image_size[0], image_size[1]
for _ in range(4 ):
snake_case__ : List[str] = floor(((height + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
snake_case__ : str = floor(((width + 2 * self.padding - self.kernel_size) / self.stride) + 1 )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, ceil(height / 4 ) * ceil(width / 4 ), self.hidden_sizes[-1]) , )
def _lowercase ( self : Dict , __A : Tuple , __A : Optional[int] , __A : Optional[Any] ):
snake_case__ : Tuple = self.num_labels
snake_case__ : Optional[Any] = LevitForImageClassification(__A )
model.to(__A )
model.eval()
snake_case__ : Dict = model(__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Any = self.prepare_config_and_inputs()
snake_case__, snake_case__, snake_case__ : Dict = config_and_inputs
snake_case__ : Any = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(LevitModel, LevitForImageClassification, LevitForImageClassificationWithTeacher)
if is_torch_available()
else ()
)
a_ = (
{
"feature-extraction": LevitModel,
"image-classification": (LevitForImageClassification, LevitForImageClassificationWithTeacher),
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
a_ = False
a_ = False
a_ = False
def _lowercase ( self : Any ):
snake_case__ : str = LevitModelTester(self )
snake_case__ : Optional[Any] = ConfigTester(self , config_class=__A , has_text_modality=__A , hidden_size=3_7 )
def _lowercase ( self : List[str] ):
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _lowercase ( self : Optional[int] ):
return
@unittest.skip(reason="Levit does not use inputs_embeds" )
def _lowercase ( self : Dict ):
pass
@unittest.skip(reason="Levit does not support input and output embeddings" )
def _lowercase ( self : str ):
pass
@unittest.skip(reason="Levit does not output attentions" )
def _lowercase ( self : Optional[int] ):
pass
def _lowercase ( self : int ):
snake_case__, snake_case__ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Optional[int] = model_class(__A )
snake_case__ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
snake_case__ : Optional[int] = [*signature.parameters.keys()]
snake_case__ : Tuple = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __A )
def _lowercase ( self : Tuple ):
def check_hidden_states_output(__A : Any , __A : Dict , __A : Any ):
snake_case__ : int = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
snake_case__ : List[Any] = model(**self._prepare_for_class(__A , __A ) )
snake_case__ : Optional[Any] = outputs.hidden_states
snake_case__ : int = len(self.model_tester.depths ) + 1
self.assertEqual(len(__A ) , __A )
snake_case__ : Dict = (self.model_tester.image_size, self.model_tester.image_size)
snake_case__, snake_case__ : str = image_size[0], image_size[1]
for _ in range(4 ):
snake_case__ : Dict = floor(
(
(height + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
snake_case__ : Union[str, Any] = floor(
(
(width + 2 * self.model_tester.padding - self.model_tester.kernel_size)
/ self.model_tester.stride
)
+ 1 )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [
height * width,
self.model_tester.hidden_sizes[0],
] , )
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
snake_case__ : Any = True
check_hidden_states_output(__A , __A , __A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
snake_case__ : List[Any] = True
check_hidden_states_output(__A , __A , __A )
@unittest.skip("Will be fixed soon by reducing the size of the model used for common tests." )
def _lowercase ( self : Tuple ):
pass
def _lowercase ( self : List[str] , __A : str , __A : Optional[int] , __A : Optional[int]=False ):
snake_case__ : Optional[Any] = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
del inputs_dict["labels"]
return inputs_dict
def _lowercase ( self : List[str] ):
snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : Tuple ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__A )
def _lowercase ( self : List[Any] ):
if not self.model_tester.is_training:
return
snake_case__, snake_case__ : List[Any] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Union[str, Any] = True
for model_class in self.all_model_classes:
# LevitForImageClassificationWithTeacher supports inference-only
if (
model_class in get_values(__A )
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
snake_case__ : List[str] = model_class(__A )
model.to(__A )
model.train()
snake_case__ : Dict = self._prepare_for_class(__A , __A , return_labels=__A )
snake_case__ : Tuple = model(**__A ).loss
loss.backward()
def _lowercase ( self : Tuple ):
snake_case__, snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
snake_case__ : str = False
snake_case__ : List[str] = True
for model_class in self.all_model_classes:
if model_class in get_values(__A ) or not model_class.supports_gradient_checkpointing:
continue
# LevitForImageClassificationWithTeacher supports inference-only
if model_class.__name__ == "LevitForImageClassificationWithTeacher":
continue
snake_case__ : Union[str, Any] = model_class(__A )
model.gradient_checkpointing_enable()
model.to(__A )
model.train()
snake_case__ : int = self._prepare_for_class(__A , __A , return_labels=__A )
snake_case__ : Tuple = model(**__A ).loss
loss.backward()
def _lowercase ( self : Dict ):
snake_case__, snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
snake_case__ : Tuple = [
{"title": "multi_label_classification", "num_labels": 2, "dtype": torch.float},
{"title": "single_label_classification", "num_labels": 1, "dtype": torch.long},
{"title": "regression", "num_labels": 1, "dtype": torch.float},
]
for model_class in self.all_model_classes:
if (
model_class
not in [
*get_values(__A ),
]
or model_class.__name__ == "LevitForImageClassificationWithTeacher"
):
continue
for problem_type in problem_types:
with self.subTest(msg=f'''Testing {model_class} with {problem_type['title']}''' ):
snake_case__ : Optional[Any] = problem_type["title"]
snake_case__ : str = problem_type["num_labels"]
snake_case__ : int = model_class(__A )
model.to(__A )
model.train()
snake_case__ : Optional[int] = self._prepare_for_class(__A , __A , return_labels=__A )
if problem_type["num_labels"] > 1:
snake_case__ : Optional[int] = inputs["labels"].unsqueeze(1 ).repeat(1 , problem_type["num_labels"] )
snake_case__ : List[Any] = inputs["labels"].to(problem_type["dtype"] )
# This tests that we do not trigger the warning form PyTorch "Using a target size that is different
# to the input size. This will likely lead to incorrect results due to broadcasting. Please ensure
# they have the same size." which is a symptom something in wrong for the regression problem.
# See https://github.com/huggingface/transformers/issues/11780
with warnings.catch_warnings(record=__A ) as warning_list:
snake_case__ : List[str] = model(**__A ).loss
for w in warning_list:
if "Using a target size that is different to the input size" in str(w.message ):
raise ValueError(
f'''Something is going wrong in the regression problem: intercepted {w.message}''' )
loss.backward()
@slow
def _lowercase ( self : List[str] ):
for model_name in LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : int = LevitModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : Dict ):
return LevitImageProcessor.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] )
@slow
def _lowercase ( self : Optional[Any] ):
snake_case__ : Dict = LevitForImageClassificationWithTeacher.from_pretrained(LEVIT_PRETRAINED_MODEL_ARCHIVE_LIST[0] ).to(
__A )
snake_case__ : Any = self.default_image_processor
snake_case__ : List[Any] = prepare_img()
snake_case__ : Optional[int] = image_processor(images=__A , return_tensors="pt" ).to(__A )
# forward pass
with torch.no_grad():
snake_case__ : Optional[int] = model(**__A )
# verify the logits
snake_case__ : Optional[int] = torch.Size((1, 1_0_0_0) )
self.assertEqual(outputs.logits.shape , __A )
snake_case__ : Union[str, Any] = torch.tensor([1.0_4_4_8, -0.3_7_4_5, -1.8_3_1_7] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __A , atol=1e-4 ) )
| 25 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : str = value
elif weight_type == "weight_g":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : str = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
snake_case__ : str = []
snake_case__ : Optional[int] = fairseq_model.state_dict()
snake_case__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[Any] = "weight"
else:
snake_case__ : Optional[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
snake_case__ : Tuple = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : str = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ):
if config_path is not None:
snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = UniSpeechSatConfig()
snake_case__ : str = ""
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ )
else:
snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ )
snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25 | 1 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCamelCase : Union[str, Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCamelCase : List[Any] = concatenate_datasets
__lowerCamelCase : List[str] = DownloadConfig
__lowerCamelCase : Union[str, Any] = DownloadManager
__lowerCamelCase : str = DownloadMode
__lowerCamelCase : Union[str, Any] = DownloadConfig
__lowerCamelCase : List[str] = DownloadMode
__lowerCamelCase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 25 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , ):
if attention_mask is None:
snake_case__ : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case__ : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case__ : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ )
if decoder_head_mask is None:
snake_case__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
if cross_attn_head_mask is None:
snake_case__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Any , __A : List[str]=1_3 , __A : List[Any]=7 , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : str=9_9 , __A : Optional[Any]=1_6 , __A : Optional[Any]=2 , __A : Any=4 , __A : List[Any]=4 , __A : int="relu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[Any]=2_0 , __A : Optional[Any]=2 , __A : int=1 , __A : Union[str, Any]=0 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : List[str] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : List[str] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : int = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Tuple = eos_token_id
snake_case__ : Dict = pad_token_id
snake_case__ : str = bos_token_id
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.eos_token_id # Eos Token
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case__ : int = input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Union[str, Any] = self.get_config()
snake_case__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _lowercase ( self : Dict ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : Optional[Any] , __A : int , __A : Dict ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
snake_case__ : List[Any] = inputs_dict["input_ids"]
snake_case__ : Optional[Any] = inputs_dict["attention_mask"]
snake_case__ : Union[str, Any] = inputs_dict["head_mask"]
# first forward pass
snake_case__ : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
snake_case__, snake_case__ : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case__ : Tuple = model(__A , attention_mask=__A )["last_hidden_state"]
snake_case__ : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[
"last_hidden_state"
]
# select random slice
snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) )
def _lowercase ( self : str , __A : Dict , __A : Optional[Any] ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval()
snake_case__ : Union[str, Any] = model(**__A )
snake_case__ : Tuple = outputs.encoder_last_hidden_state
snake_case__ : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_encoder()
encoder.save_pretrained(__A )
snake_case__ : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_decoder()
decoder.save_pretrained(__A )
snake_case__ : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a_ = True
a_ = True
a_ = False
a_ = False
def _lowercase ( self : int , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowercase ( self : Tuple ):
snake_case__ : Any = MaMaaaModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__A )
def _lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
snake_case__, snake_case__ : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["missing_keys"] , [] )
def _lowercase ( self : Dict ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def _lowercase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case__ : str = model_class(__A )
model.to(__A )
model.eval()
snake_case__ : str = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
snake_case__ : Optional[Any] = inputs["input_ids"]
del inputs["input_ids"]
else:
snake_case__ : Union[str, Any] = inputs["input_ids"]
snake_case__ : List[str] = inputs.get("decoder_input_ids" , __A )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , __A )
snake_case__ : Tuple = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case__ : List[Any] = wte(__A )
else:
snake_case__ : Any = wte(__A )
snake_case__ : Optional[int] = wte(__A )
with torch.no_grad():
model(**__A )[0]
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Any = input_dict["input_ids"]
snake_case__ : int = input_ids.ne(1 ).to(__A )
snake_case__ : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ )
__lowerCamelCase : Optional[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ):
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def _lowercase ( self : Optional[int] ):
snake_case__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : str = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : Optional[Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
# change to intended input
snake_case__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : List[str] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
snake_case__ : List[Any] = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case__ : str = tokenizer(__A , padding=__A , return_tensors="pt" )
snake_case__ : Tuple = model.generate(
input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
snake_case__ : List[str] = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
snake_case__ : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 25 | 1 |
import math
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(snake_case_ ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def SCREAMING_SNAKE_CASE ( snake_case_ : float = 0.1 ):
snake_case__ : Dict = 3
snake_case__ : Any = 3
while primes / (2 * j - 1) >= ratio:
for i in range(j * j + j + 1 , (j + 2) * (j + 2) , j + 1 ):
primes += is_prime(snake_case_ )
j += 2
return j
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] ):
snake_case__ : Optional[int] = []
for part_id in partition_order:
snake_case__ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(snake_case_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Union[str, Any] = spark.range(100 ).repartition(1 )
snake_case__ : Any = Spark(snake_case_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[Any] = spark.range(10 ).repartition(2 )
snake_case__ : Optional[Any] = [1, 0]
snake_case__ : Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions.
snake_case__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case__, snake_case__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[int] = spark.range(10 ).repartition(1 )
snake_case__ : Union[str, Any] = SparkExamplesIterable(snake_case_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
snake_case__ : Union[str, Any] = lambda snake_case_ : x.reverse()
snake_case__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] )
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case__ : Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(100 ).repartition(1 )
snake_case__ : Union[str, Any] = Spark(snake_case_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {"""configuration_reformer""": ["""REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP""", """ReformerConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = ["""ReformerTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = ["""ReformerTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[str] = [
"""REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""ReformerAttention""",
"""ReformerForMaskedLM""",
"""ReformerForQuestionAnswering""",
"""ReformerForSequenceClassification""",
"""ReformerLayer""",
"""ReformerModel""",
"""ReformerModelWithLMHead""",
"""ReformerPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_reformer import REFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, ReformerConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer import ReformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_reformer_fast import ReformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_reformer import (
REFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
ReformerAttention,
ReformerForMaskedLM,
ReformerForQuestionAnswering,
ReformerForSequenceClassification,
ReformerLayer,
ReformerModel,
ReformerModelWithLMHead,
ReformerPreTrainedModel,
)
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : list , snake_case_ : int | None = None , snake_case_ : int | None = None ):
if start is None:
snake_case__ : Tuple = 0
if end is None:
snake_case__ : List[str] = len(snake_case_ ) - 1
if start >= end:
return
snake_case__ : int = (start + end) // 2
slowsort(snake_case_ , snake_case_ , snake_case_ )
slowsort(snake_case_ , mid + 1 , snake_case_ )
if sequence[end] < sequence[mid]:
snake_case__, snake_case__ : Union[str, Any] = sequence[mid], sequence[end]
slowsort(snake_case_ , snake_case_ , end - 1 )
if __name__ == "__main__":
from doctest import testmod
testmod()
| 25 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 25 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] , snake_case_ : int ):
snake_case__ : List[str] = 0
snake_case__ : List[str] = len(snake_case_ ) - 1
while i < j:
if nums[i] + nums[j] == target:
return [i, j]
elif nums[i] + nums[j] < target:
snake_case__ : Optional[Any] = i + 1
else:
snake_case__ : List[str] = j - 1
return []
if __name__ == "__main__":
import doctest
doctest.testmod()
print(f"{two_pointer([2, 7, 11, 15], 9) = }")
| 25 |
import numpy as np
from matplotlib import pyplot as plt
from sklearn.datasets import load_iris
from sklearn.metrics import ConfusionMatrixDisplay
from sklearn.model_selection import train_test_split
from xgboost import XGBClassifier
def SCREAMING_SNAKE_CASE ( snake_case_ : dict ):
return (data["data"], data["target"])
def SCREAMING_SNAKE_CASE ( snake_case_ : np.ndarray , snake_case_ : np.ndarray ):
snake_case__ : Optional[int] = XGBClassifier()
classifier.fit(snake_case_ , snake_case_ )
return classifier
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = load_iris()
snake_case__, snake_case__ : str = data_handling(snake_case_ )
snake_case__, snake_case__, snake_case__, snake_case__ : int = train_test_split(
snake_case_ , snake_case_ , test_size=0.25 )
snake_case__ : Dict = iris["target_names"]
# Create an XGBoost Classifier from the training data
snake_case__ : Dict = xgboost(snake_case_ , snake_case_ )
# Display the confusion matrix of the classifier with both training and test sets
ConfusionMatrixDisplay.from_estimator(
snake_case_ , snake_case_ , snake_case_ , display_labels=snake_case_ , cmap="Blues" , normalize="true" , )
plt.title("Normalized Confusion Matrix - IRIS Dataset" )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
main()
| 25 | 1 |
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
EulerAncestralDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
StableDiffusionInstructPixaPixPipeline,
UNetaDConditionModel,
)
from diffusers.image_processor import VaeImageProcessor
from diffusers.utils import floats_tensor, load_image, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..pipeline_params import (
IMAGE_TO_IMAGE_IMAGE_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = StableDiffusionInstructPixaPixPipeline
a_ = TEXT_GUIDED_IMAGE_VARIATION_PARAMS - {"height", "width", "cross_attention_kwargs"}
a_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
a_ = IMAGE_TO_IMAGE_IMAGE_PARAMS
def _lowercase ( self : str ):
torch.manual_seed(0 )
snake_case__ : Union[str, Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=8 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
snake_case__ : Any = PNDMScheduler(skip_prk_steps=__A )
torch.manual_seed(0 )
snake_case__ : str = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
torch.manual_seed(0 )
snake_case__ : Tuple = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
snake_case__ : int = CLIPTextModel(__A )
snake_case__ : Union[str, Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
snake_case__ : Any = {
"unet": unet,
"scheduler": scheduler,
"vae": vae,
"text_encoder": text_encoder,
"tokenizer": tokenizer,
"safety_checker": None,
"feature_extractor": None,
}
return components
def _lowercase ( self : Union[str, Any] , __A : Dict , __A : Optional[Any]=0 ):
snake_case__ : List[Any] = floats_tensor((1, 3, 3_2, 3_2) , rng=random.Random(__A ) ).to(__A )
snake_case__ : str = image.cpu().permute(0 , 2 , 3 , 1 )[0]
snake_case__ : int = Image.fromarray(np.uinta(__A ) ).convert("RGB" )
if str(__A ).startswith("mps" ):
snake_case__ : List[Any] = torch.manual_seed(__A )
else:
snake_case__ : List[Any] = torch.Generator(device=__A ).manual_seed(__A )
snake_case__ : Optional[Any] = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 2,
"guidance_scale": 6.0,
"image_guidance_scale": 1,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : Dict ):
snake_case__ : Tuple = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : int = self.get_dummy_components()
snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : int = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : Tuple = self.get_dummy_inputs(__A )
snake_case__ : List[str] = sd_pipe(**__A ).images
snake_case__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : int = np.array([0.7_5_2_6, 0.3_7_5_0, 0.4_5_4_7, 0.6_1_1_7, 0.5_8_6_6, 0.5_0_1_6, 0.4_3_2_7, 0.5_6_4_2, 0.4_8_1_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[Any] = self.get_dummy_components()
snake_case__ : Dict = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : Optional[int] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : int = self.get_dummy_inputs(__A )
snake_case__ : Optional[int] = "french fries"
snake_case__ : List[Any] = sd_pipe(**__A , negative_prompt=__A )
snake_case__ : str = output.images
snake_case__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : Any = np.array([0.7_5_1_1, 0.3_6_4_2, 0.4_5_5_3, 0.6_2_3_6, 0.5_7_9_7, 0.5_0_1_3, 0.4_3_4_3, 0.5_6_1_1, 0.4_8_3_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : int ):
snake_case__ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[Any] = self.get_dummy_components()
snake_case__ : Dict = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : int = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : str = self.get_dummy_inputs(__A )
snake_case__ : Optional[Any] = [inputs["prompt"]] * 2
snake_case__ : List[Any] = np.array(inputs["image"] ).astype(np.floataa ) / 2_5_5.0
snake_case__ : Tuple = torch.from_numpy(__A ).unsqueeze(0 ).to(__A )
snake_case__ : List[str] = image / 2 + 0.5
snake_case__ : List[Any] = image.permute(0 , 3 , 1 , 2 )
snake_case__ : str = image.repeat(2 , 1 , 1 , 1 )
snake_case__ : str = sd_pipe(**__A ).images
snake_case__ : List[Any] = image[-1, -3:, -3:, -1]
assert image.shape == (2, 3_2, 3_2, 3)
snake_case__ : Union[str, Any] = np.array([0.5_8_1_2, 0.5_7_4_8, 0.5_2_2_2, 0.5_9_0_8, 0.5_6_9_5, 0.7_1_7_4, 0.6_8_0_4, 0.5_5_2_3, 0.5_5_7_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : List[str] ):
snake_case__ : List[str] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : Any = self.get_dummy_components()
snake_case__ : Optional[Any] = EulerAncestralDiscreteScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" )
snake_case__ : Optional[int] = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : Tuple = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : int = self.get_dummy_inputs(__A )
snake_case__ : Dict = sd_pipe(**__A ).images
snake_case__ : int = image[0, -3:, -3:, -1]
snake_case__ : Tuple = [round(__A , 4 ) for x in image_slice.flatten().tolist()]
print(",".join([str(__A ) for x in slice] ) )
assert image.shape == (1, 3_2, 3_2, 3)
snake_case__ : Union[str, Any] = np.array([0.7_4_1_7, 0.3_8_4_2, 0.4_7_3_2, 0.5_7_7_6, 0.5_8_9_1, 0.5_1_3_9, 0.4_0_5_2, 0.5_6_7_3, 0.4_9_8_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-3
def _lowercase ( self : Any ):
super().test_inference_batch_single_identical(expected_max_diff=3e-3 )
def _lowercase ( self : int ):
snake_case__ : Union[str, Any] = self.get_dummy_components()
snake_case__ : str = StableDiffusionInstructPixaPixPipeline(**__A )
snake_case__ : List[Any] = VaeImageProcessor(do_resize=__A , do_normalize=__A )
snake_case__ : Optional[int] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
snake_case__ : Tuple = pipe(**self.get_dummy_inputs_by_type(__A , input_image_type="pt" ) )[0]
snake_case__ : List[str] = components["vae"]
snake_case__ : List[str] = self.get_dummy_inputs_by_type(__A , input_image_type="pt" )
for image_param in self.image_latents_params:
if image_param in inputs.keys():
snake_case__ : int = vae.encode(inputs[image_param] ).latent_dist.mode()
snake_case__ : List[str] = pipe(**__A )[0]
snake_case__ : Optional[Any] = np.abs(out - out_latents_inputs ).max()
self.assertLess(__A , 1e-4 , "passing latents as image input generate different result from passing image" )
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Optional[Any] ):
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Optional[Any] , __A : Union[str, Any]=0 ):
snake_case__ : List[str] = torch.manual_seed(__A )
snake_case__ : int = load_image(
"https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_pix2pix/example.jpg" )
snake_case__ : Any = {
"prompt": "turn him into a cyborg",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"image_guidance_scale": 1.0,
"output_type": "numpy",
}
return inputs
def _lowercase ( self : Dict ):
snake_case__ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : List[str] = self.get_inputs()
snake_case__ : List[Any] = pipe(**__A ).images
snake_case__ : Optional[int] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Union[str, Any] = np.array([0.5_9_0_2, 0.6_0_1_5, 0.6_0_2_7, 0.5_9_8_3, 0.6_0_9_2, 0.6_0_6_1, 0.5_7_6_5, 0.5_7_8_5, 0.5_5_5_5] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowercase ( self : Any ):
snake_case__ : List[str] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A )
snake_case__ : Union[str, Any] = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : Optional[Any] = self.get_inputs()
snake_case__ : str = pipe(**__A ).images
snake_case__ : Any = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Union[str, Any] = np.array([0.6_5_7_8, 0.6_8_1_7, 0.6_9_7_2, 0.6_7_6_1, 0.6_8_5_6, 0.6_9_1_6, 0.6_4_2_8, 0.6_5_1_6, 0.6_3_0_1] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowercase ( self : Dict ):
snake_case__ : Dict = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A )
snake_case__ : Dict = DDIMScheduler.from_config(pipe.scheduler.config )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : List[str] = self.get_inputs()
snake_case__ : List[str] = pipe(**__A ).images
snake_case__ : Optional[Any] = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Optional[int] = np.array([0.3_8_2_8, 0.3_8_3_4, 0.3_8_1_8, 0.3_7_9_2, 0.3_8_6_5, 0.3_7_5_2, 0.3_7_9_2, 0.3_8_4_7, 0.3_7_5_3] )
assert np.abs(expected_slice - image_slice ).max() < 1e-3
def _lowercase ( self : Optional[int] ):
snake_case__ : Optional[Any] = 0
def callback_fn(__A : int , __A : int , __A : torch.FloatTensor ) -> None:
snake_case__ : Any = True
nonlocal number_of_steps
number_of_steps += 1
if step == 1:
snake_case__ : str = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case__ : List[str] = latents[0, -3:, -3:, -1]
snake_case__ : int = np.array([-0.2_4_6_3, -0.4_6_4_4, -0.9_7_5_6, 1.5_1_7_6, 1.4_4_1_4, 0.7_8_6_6, 0.9_8_9_7, 0.8_5_2_1, 0.7_9_8_3] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
elif step == 2:
snake_case__ : Tuple = latents.detach().cpu().numpy()
assert latents.shape == (1, 4, 6_4, 6_4)
snake_case__ : List[str] = latents[0, -3:, -3:, -1]
snake_case__ : List[str] = np.array([-0.2_6_4_4, -0.4_6_2_6, -0.9_6_5_3, 1.5_1_7_6, 1.4_5_5_1, 0.7_6_8_6, 0.9_8_0_5, 0.8_4_5_2, 0.8_1_1_5] )
assert np.abs(latents_slice.flatten() - expected_slice ).max() < 5e-2
snake_case__ : Union[str, Any] = False
snake_case__ : int = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A , torch_dtype=torch.floataa )
snake_case__ : List[Any] = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : Optional[int] = self.get_inputs()
pipe(**__A , callback=__A , callback_steps=1 )
assert callback_fn.has_been_called
assert number_of_steps == 3
def _lowercase ( self : Tuple ):
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
snake_case__ : List[Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
"timbrooks/instruct-pix2pix" , safety_checker=__A , torch_dtype=torch.floataa )
snake_case__ : Any = pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
snake_case__ : Optional[Any] = self.get_inputs()
snake_case__ : Dict = pipe(**__A )
snake_case__ : Dict = torch.cuda.max_memory_allocated()
# make sure that less than 2.2 GB is allocated
assert mem_bytes < 2.2 * 1_0**9
def _lowercase ( self : int ):
snake_case__ : int = self.get_inputs()
# resize to resolution that is divisible by 8 but not 16 or 32
snake_case__ : Optional[Any] = inputs["image"].resize((5_0_4, 5_0_4) )
snake_case__ : List[str] = "timbrooks/instruct-pix2pix"
snake_case__ : Union[str, Any] = StableDiffusionInstructPixaPixPipeline.from_pretrained(
__A , safety_checker=__A , )
pipe.to(__A )
pipe.set_progress_bar_config(disable=__A )
pipe.enable_attention_slicing()
snake_case__ : Union[str, Any] = pipe(**__A )
snake_case__ : Union[str, Any] = output.images[0]
snake_case__ : str = image[2_5_5:2_5_8, 3_8_3:3_8_6, -1]
assert image.shape == (5_0_4, 5_0_4, 3)
snake_case__ : Optional[int] = np.array([0.2_7_2_6, 0.2_5_2_9, 0.2_6_6_4, 0.2_6_5_5, 0.2_6_4_1, 0.2_6_4_2, 0.2_5_9_1, 0.2_6_4_9, 0.2_5_9_0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-3
| 25 |
import argparse
import re
from typing import Dict
import torch
from datasets import Audio, Dataset, load_dataset, load_metric
from transformers import AutoFeatureExtractor, pipeline
def SCREAMING_SNAKE_CASE ( snake_case_ : Dataset , snake_case_ : Dict[str, str] ):
snake_case__ : Tuple = args.log_outputs
snake_case__ : Union[str, Any] = "_".join(args.dataset.split("/" ) + [args.config, args.split] )
# load metric
snake_case__ : List[str] = load_metric("wer" )
snake_case__ : List[str] = load_metric("cer" )
# compute metrics
snake_case__ : List[Any] = wer.compute(references=result["target"] , predictions=result["prediction"] )
snake_case__ : List[str] = cer.compute(references=result["target"] , predictions=result["prediction"] )
# print & log results
snake_case__ : Dict = F'''WER: {wer_result}\nCER: {cer_result}'''
print(snake_case_ )
with open(F'''{dataset_id}_eval_results.txt''' , "w" ) as f:
f.write(snake_case_ )
# log all results in text file. Possibly interesting for analysis
if log_outputs is not None:
snake_case__ : Union[str, Any] = F'''log_{dataset_id}_predictions.txt'''
snake_case__ : int = F'''log_{dataset_id}_targets.txt'''
with open(snake_case_ , "w" ) as p, open(snake_case_ , "w" ) as t:
# mapping function to write output
def write_to_file(snake_case_ : Union[str, Any] , snake_case_ : Any ):
p.write(F'''{i}''' + "\n" )
p.write(batch["prediction"] + "\n" )
t.write(F'''{i}''' + "\n" )
t.write(batch["target"] + "\n" )
result.map(snake_case_ , with_indices=snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : List[Any] = "[,?.!\-\;\:\"“%‘”�—’…–]" # noqa: W605 IMPORTANT: this should correspond to the chars that were ignored during training
snake_case__ : Optional[int] = re.sub(snake_case_ , "" , text.lower() )
# In addition, we can normalize the target text, e.g. removing new lines characters etc...
# note that order is important here!
snake_case__ : Optional[Any] = ["\n\n", "\n", " ", " "]
for t in token_sequences_to_ignore:
snake_case__ : Optional[int] = " ".join(text.split(snake_case_ ) )
return text
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
# load dataset
snake_case__ : int = load_dataset(args.dataset , args.config , split=args.split , use_auth_token=snake_case_ )
# for testing: only process the first two examples as a test
# dataset = dataset.select(range(10))
# load processor
snake_case__ : List[str] = AutoFeatureExtractor.from_pretrained(args.model_id )
snake_case__ : List[Any] = feature_extractor.sampling_rate
# resample audio
snake_case__ : Dict = dataset.cast_column("audio" , Audio(sampling_rate=snake_case_ ) )
# load eval pipeline
if args.device is None:
snake_case__ : int = 0 if torch.cuda.is_available() else -1
snake_case__ : List[str] = pipeline("automatic-speech-recognition" , model=args.model_id , device=args.device )
# map function to decode audio
def map_to_pred(snake_case_ : Any ):
snake_case__ : Union[str, Any] = asr(
batch["audio"]["array"] , chunk_length_s=args.chunk_length_s , stride_length_s=args.stride_length_s )
snake_case__ : Optional[int] = prediction["text"]
snake_case__ : Optional[Any] = normalize_text(batch["sentence"] )
return batch
# run inference on all examples
snake_case__ : Any = dataset.map(snake_case_ , remove_columns=dataset.column_names )
# compute and log_results
# do not change function below
log_results(snake_case_ , snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : Dict = argparse.ArgumentParser()
parser.add_argument(
"""--model_id""", type=str, required=True, help="""Model identifier. Should be loadable with 🤗 Transformers"""
)
parser.add_argument(
"""--dataset""",
type=str,
required=True,
help="""Dataset name to evaluate the `model_id`. Should be loadable with 🤗 Datasets""",
)
parser.add_argument(
"""--config""", type=str, required=True, help="""Config of the dataset. *E.g.* `'en'` for Common Voice"""
)
parser.add_argument("""--split""", type=str, required=True, help="""Split of the dataset. *E.g.* `'test'`""")
parser.add_argument(
"""--chunk_length_s""", type=float, default=None, help="""Chunk length in seconds. Defaults to 5 seconds."""
)
parser.add_argument(
"""--stride_length_s""", type=float, default=None, help="""Stride of the audio chunks. Defaults to 1 second."""
)
parser.add_argument(
"""--log_outputs""", action="""store_true""", help="""If defined, write outputs to log file for analysis."""
)
parser.add_argument(
"""--device""",
type=int,
default=None,
help="""The device to run the pipeline on. -1 for CPU (default), 0 for the first GPU and so on.""",
)
__lowerCamelCase : str = parser.parse_args()
main(args)
| 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 |
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=UpperCamelCase_ )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = field(default="text-classification" , metadata={"include_in_asdict_even_if_is_default": True} )
a_ = Features({"text": Value("string" )} )
a_ = Features({"labels": ClassLabel} )
a_ = "text"
a_ = "labels"
def _lowercase ( self : Tuple , __A : List[Any] ):
if self.label_column not in features:
raise ValueError(f'''Column {self.label_column} is not present in features.''' )
if not isinstance(features[self.label_column] , __A ):
raise ValueError(f'''Column {self.label_column} is not a ClassLabel.''' )
snake_case__ : Any = copy.deepcopy(self )
snake_case__ : Optional[Any] = self.label_schema.copy()
snake_case__ : List[str] = features[self.label_column]
snake_case__ : Dict = label_schema
return task_template
@property
def _lowercase ( self : Tuple ):
return {
self.text_column: "text",
self.label_column: "labels",
}
| 25 | 1 |
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion_safe import StableDiffusionPipelineSafe as StableDiffusionPipeline
from diffusers.utils import floats_tensor, nightly, torch_device
from diffusers.utils.testing_utils import require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _lowercase ( self : int ):
snake_case__ : Any = 1
snake_case__ : Dict = 3
snake_case__ : Union[str, Any] = (3_2, 3_2)
snake_case__ : Union[str, Any] = floats_tensor((batch_size, num_channels) + sizes , rng=random.Random(0 ) ).to(__A )
return image
@property
def _lowercase ( self : List[str] ):
torch.manual_seed(0 )
snake_case__ : List[Any] = UNetaDConditionModel(
block_out_channels=(3_2, 6_4) , layers_per_block=2 , sample_size=3_2 , in_channels=4 , out_channels=4 , down_block_types=("DownBlock2D", "CrossAttnDownBlock2D") , up_block_types=("CrossAttnUpBlock2D", "UpBlock2D") , cross_attention_dim=3_2 , )
return model
@property
def _lowercase ( self : str ):
torch.manual_seed(0 )
snake_case__ : Dict = AutoencoderKL(
block_out_channels=[3_2, 6_4] , in_channels=3 , out_channels=3 , down_block_types=["DownEncoderBlock2D", "DownEncoderBlock2D"] , up_block_types=["UpDecoderBlock2D", "UpDecoderBlock2D"] , latent_channels=4 , )
return model
@property
def _lowercase ( self : Optional[Any] ):
torch.manual_seed(0 )
snake_case__ : List[str] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=3_2 , intermediate_size=3_7 , layer_norm_eps=1e-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1_0_0_0 , )
return CLIPTextModel(__A )
@property
def _lowercase ( self : int ):
def extract(*__A : str , **__A : List[str] ):
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Optional[Any] ):
snake_case__ : str = torch.ones([0] )
def _lowercase ( self : Any , __A : str ):
self.pixel_values.to(__A )
return self
return Out()
return extract
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Optional[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : Optional[Any] = self.dummy_cond_unet
snake_case__ : Optional[Any] = DDIMScheduler(
beta_start=0.0_0_0_8_5 , beta_end=0.0_1_2 , beta_schedule="scaled_linear" , clip_sample=__A , set_alpha_to_one=__A , )
snake_case__ : Dict = self.dummy_vae
snake_case__ : Union[str, Any] = self.dummy_text_encoder
snake_case__ : Dict = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
snake_case__ : Dict = StableDiffusionPipeline(
unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , )
snake_case__ : Any = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : str = "A painting of a squirrel eating a burger"
snake_case__ : Optional[int] = torch.Generator(device=__A ).manual_seed(0 )
snake_case__ : Optional[Any] = sd_pipe([prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
snake_case__ : int = output.images
snake_case__ : Dict = torch.Generator(device=__A ).manual_seed(0 )
snake_case__ : int = sd_pipe(
[prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=__A , )[0]
snake_case__ : Tuple = image[0, -3:, -3:, -1]
snake_case__ : Optional[Any] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : int = np.array([0.5_7_5_6, 0.6_1_1_8, 0.5_0_0_5, 0.5_0_4_1, 0.5_4_7_1, 0.4_7_2_6, 0.4_9_7_6, 0.4_8_6_5, 0.4_8_6_4] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Dict ):
snake_case__ : List[Any] = "cpu" # ensure determinism for the device-dependent torch.Generator
snake_case__ : Any = self.dummy_cond_unet
snake_case__ : List[Any] = PNDMScheduler(skip_prk_steps=__A )
snake_case__ : List[Any] = self.dummy_vae
snake_case__ : Tuple = self.dummy_text_encoder
snake_case__ : Optional[Any] = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# make sure here that pndm scheduler skips prk
snake_case__ : Dict = StableDiffusionPipeline(
unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , )
snake_case__ : List[str] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : List[str] = "A painting of a squirrel eating a burger"
snake_case__ : Union[str, Any] = torch.Generator(device=__A ).manual_seed(0 )
snake_case__ : Optional[int] = sd_pipe([prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" )
snake_case__ : Tuple = output.images
snake_case__ : int = torch.Generator(device=__A ).manual_seed(0 )
snake_case__ : str = sd_pipe(
[prompt] , generator=__A , guidance_scale=6.0 , num_inference_steps=2 , output_type="np" , return_dict=__A , )[0]
snake_case__ : List[str] = image[0, -3:, -3:, -1]
snake_case__ : Optional[int] = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 6_4, 6_4, 3)
snake_case__ : Dict = np.array([0.5_1_2_5, 0.5_7_1_6, 0.4_8_2_8, 0.5_0_6_0, 0.5_6_5_0, 0.4_7_6_8, 0.5_1_8_5, 0.4_8_9_5, 0.4_9_9_3] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : str ):
snake_case__ : Dict = StableDiffusionPipeline.from_pretrained(
"hf-internal-testing/tiny-stable-diffusion-lms-pipe" , safety_checker=__A )
assert isinstance(__A , __A )
assert isinstance(pipe.scheduler , __A )
assert pipe.safety_checker is None
snake_case__ : List[str] = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
# check that there's no error when saving a pipeline with one of the models being None
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__A )
snake_case__ : Optional[int] = StableDiffusionPipeline.from_pretrained(__A )
# sanity check that the pipeline still works
assert pipe.safety_checker is None
snake_case__ : Dict = pipe("example prompt" , num_inference_steps=2 ).images[0]
assert image is not None
@unittest.skipIf(torch_device != "cuda" , "This test requires a GPU" )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Any = self.dummy_cond_unet
snake_case__ : List[str] = PNDMScheduler(skip_prk_steps=__A )
snake_case__ : List[Any] = self.dummy_vae
snake_case__ : Union[str, Any] = self.dummy_text_encoder
snake_case__ : str = CLIPTokenizer.from_pretrained("hf-internal-testing/tiny-random-clip" )
# put models in fp16
snake_case__ : List[Any] = unet.half()
snake_case__ : Dict = vae.half()
snake_case__ : str = bert.half()
# make sure here that pndm scheduler skips prk
snake_case__ : Dict = StableDiffusionPipeline(
unet=__A , scheduler=__A , vae=__A , text_encoder=__A , tokenizer=__A , safety_checker=__A , feature_extractor=self.dummy_extractor , )
snake_case__ : Tuple = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : List[str] = "A painting of a squirrel eating a burger"
snake_case__ : Optional[Any] = sd_pipe([prompt] , num_inference_steps=2 , output_type="np" ).images
assert image.shape == (1, 6_4, 6_4, 3)
@nightly
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Union[str, Any] ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : List[Any] ):
snake_case__ : Dict = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=__A )
snake_case__ : Union[str, Any] = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
snake_case__ : Dict = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : Optional[int] = (
"portrait of girl with smokey eyes makeup in abandoned hotel, grange clothes, redshift, wide high angle"
" coloured polaroid photograph with flash, kodak film, hyper real, stunning moody cinematography, with"
" anamorphic lenses, by maripol, fallen angels by wong kar - wai, style of suspiria and neon demon and"
" children from bahnhof zoo, detailed "
)
snake_case__ : Optional[int] = 4_0_0_3_6_6_0_3_4_6
snake_case__ : Tuple = 7
# without safety guidance (sld_guidance_scale = 0)
snake_case__ : Optional[Any] = torch.manual_seed(__A )
snake_case__ : Optional[int] = sd_pipe(
[prompt] , generator=__A , guidance_scale=__A , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
snake_case__ : Dict = output.images
snake_case__ : List[Any] = image[0, -3:, -3:, -1]
snake_case__ : Tuple = [0.2_2_7_8, 0.2_2_3_1, 0.2_2_4_9, 0.2_3_3_3, 0.2_3_0_3, 0.1_8_8_5, 0.2_2_7_3, 0.2_1_4_4, 0.2_1_7_6]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
# without safety guidance (strong configuration)
snake_case__ : Any = torch.manual_seed(__A )
snake_case__ : Dict = sd_pipe(
[prompt] , generator=__A , guidance_scale=__A , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
snake_case__ : List[str] = output.images
snake_case__ : Any = image[0, -3:, -3:, -1]
snake_case__ : str = [0.2_3_8_3, 0.2_2_7_6, 0.2_3_6, 0.2_1_9_2, 0.2_1_8_6, 0.2_0_5_3, 0.1_9_7_1, 0.1_9_0_1, 0.1_7_1_9]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Tuple ):
snake_case__ : Any = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" , safety_checker=__A )
snake_case__ : Any = LMSDiscreteScheduler.from_config(sd_pipe.scheduler.config )
snake_case__ : List[str] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : Optional[Any] = "padme amidala taking a bath artwork, safe for work, no nudity"
snake_case__ : Any = 2_7_3_4_9_7_1_7_5_5
snake_case__ : List[Any] = 7
snake_case__ : int = torch.manual_seed(__A )
snake_case__ : Any = sd_pipe(
[prompt] , generator=__A , guidance_scale=__A , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
snake_case__ : List[Any] = output.images
snake_case__ : Optional[int] = image[0, -3:, -3:, -1]
snake_case__ : Optional[Any] = [0.3_5_0_2, 0.3_6_2_2, 0.3_3_9_6, 0.3_6_4_2, 0.3_4_7_8, 0.3_3_1_8, 0.3_5, 0.3_3_4_8, 0.3_2_9_7]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
snake_case__ : Dict = torch.manual_seed(__A )
snake_case__ : str = sd_pipe(
[prompt] , generator=__A , guidance_scale=__A , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
snake_case__ : Optional[int] = output.images
snake_case__ : Dict = image[0, -3:, -3:, -1]
snake_case__ : Any = [0.5_5_3_1, 0.5_2_0_6, 0.4_8_9_5, 0.5_1_5_6, 0.5_1_8_2, 0.4_7_5_1, 0.4_8_0_2, 0.4_8_0_3, 0.4_4_4_3]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : int ):
snake_case__ : str = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5" )
snake_case__ : Any = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
snake_case__ : Union[str, Any] = (
"the four horsewomen of the apocalypse, painting by tom of finland, gaston bussiere, craig mullins, j. c."
" leyendecker"
)
snake_case__ : Any = 1_0_4_4_3_5_5_2_3_4
snake_case__ : List[str] = 1_2
snake_case__ : Dict = torch.manual_seed(__A )
snake_case__ : List[str] = sd_pipe(
[prompt] , generator=__A , guidance_scale=__A , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=0 , )
snake_case__ : List[Any] = output.images
snake_case__ : Any = image[0, -3:, -3:, -1]
snake_case__ : str = np.array([0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-7
snake_case__ : Tuple = torch.manual_seed(__A )
snake_case__ : Any = sd_pipe(
[prompt] , generator=__A , guidance_scale=__A , num_inference_steps=5_0 , output_type="np" , width=5_1_2 , height=5_1_2 , sld_guidance_scale=2_0_0_0 , sld_warmup_steps=7 , sld_threshold=0.0_2_5 , sld_momentum_scale=0.5 , sld_mom_beta=0.7 , )
snake_case__ : Optional[Any] = output.images
snake_case__ : Union[str, Any] = image[0, -3:, -3:, -1]
snake_case__ : Any = np.array([0.5_8_1_8, 0.6_2_8_5, 0.6_8_3_5, 0.6_0_1_9, 0.6_2_5, 0.6_7_5_4, 0.6_0_9_6, 0.6_3_3_4, 0.6_5_6_1] )
assert image.shape == (1, 5_1_2, 5_1_2, 3)
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 25 |
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__lowerCamelCase : Union[str, Any] = logging.get_logger(__name__)
__lowerCamelCase : Dict = {
"""Salesforce/instruct-blip-flan-t5""": """https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json""",
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_vision_model"
def __init__( self : List[Any] , __A : Dict=1_4_0_8 , __A : Tuple=6_1_4_4 , __A : str=3_9 , __A : int=1_6 , __A : str=2_2_4 , __A : Any=1_4 , __A : Dict="gelu" , __A : List[Any]=1e-6 , __A : Any=0.0 , __A : List[Any]=1e-1_0 , __A : Union[str, Any]=True , **__A : Tuple , ):
super().__init__(**__A )
snake_case__ : List[str] = hidden_size
snake_case__ : Optional[int] = intermediate_size
snake_case__ : List[str] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : str = patch_size
snake_case__ : int = image_size
snake_case__ : int = initializer_range
snake_case__ : Optional[int] = attention_dropout
snake_case__ : str = layer_norm_eps
snake_case__ : Optional[Any] = hidden_act
snake_case__ : Tuple = qkv_bias
@classmethod
def _lowercase ( cls : List[str] , __A : Union[str, os.PathLike] , **__A : Optional[Any] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : str = cls.get_config_dict(__A , **__A )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : Union[str, Any] = config_dict["vision_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip_qformer"
def __init__( self : Any , __A : Union[str, Any]=3_0_5_2_2 , __A : Union[str, Any]=7_6_8 , __A : Optional[int]=1_2 , __A : Dict=1_2 , __A : Dict=3_0_7_2 , __A : List[str]="gelu" , __A : Union[str, Any]=0.1 , __A : Tuple=0.1 , __A : Any=5_1_2 , __A : Optional[int]=0.0_2 , __A : List[str]=1e-1_2 , __A : Any=0 , __A : Optional[Any]="absolute" , __A : str=2 , __A : Any=1_4_0_8 , **__A : List[str] , ):
super().__init__(pad_token_id=__A , **__A )
snake_case__ : Dict = vocab_size
snake_case__ : Optional[int] = hidden_size
snake_case__ : Optional[Any] = num_hidden_layers
snake_case__ : str = num_attention_heads
snake_case__ : int = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Union[str, Any] = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : List[Any] = max_position_embeddings
snake_case__ : int = initializer_range
snake_case__ : Dict = layer_norm_eps
snake_case__ : str = position_embedding_type
snake_case__ : Dict = cross_attention_frequency
snake_case__ : List[str] = encoder_hidden_size
@classmethod
def _lowercase ( cls : List[Any] , __A : Union[str, os.PathLike] , **__A : Optional[int] ):
cls._set_token_in_kwargs(__A )
snake_case__, snake_case__ : Tuple = cls.get_config_dict(__A , **__A )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("model_type" ) == "instructblip":
snake_case__ : List[Any] = config_dict["qformer_config"]
if "model_type" in config_dict and hasattr(cls , "model_type" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'''You are using a model of type {config_dict['model_type']} to instantiate a model of type '''
f'''{cls.model_type}. This is not supported for all configurations of models and can yield errors.''' )
return cls.from_dict(__A , **__A )
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "instructblip"
a_ = True
def __init__( self : List[str] , __A : Optional[Any]=None , __A : Tuple=None , __A : Optional[int]=None , __A : Optional[Any]=3_2 , **__A : Optional[int] ):
super().__init__(**__A )
if vision_config is None:
snake_case__ : Any = {}
logger.info("vision_config is None. initializing the InstructBlipVisionConfig with default values." )
if qformer_config is None:
snake_case__ : Optional[Any] = {}
logger.info("qformer_config is None. Initializing the InstructBlipQFormerConfig with default values." )
if text_config is None:
snake_case__ : Optional[int] = {}
logger.info("text_config is None. Initializing the text config with default values (`OPTConfig`)." )
snake_case__ : List[Any] = InstructBlipVisionConfig(**__A )
snake_case__ : Union[str, Any] = InstructBlipQFormerConfig(**__A )
snake_case__ : Dict = text_config["model_type"] if "model_type" in text_config else "opt"
snake_case__ : List[Any] = CONFIG_MAPPING[text_model_type](**__A )
snake_case__ : Union[str, Any] = self.text_config.tie_word_embeddings
snake_case__ : Tuple = self.text_config.is_encoder_decoder
snake_case__ : str = num_query_tokens
snake_case__ : Dict = self.vision_config.hidden_size
snake_case__ : List[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
snake_case__ : int = 1.0
snake_case__ : Optional[int] = 0.0_2
@classmethod
def _lowercase ( cls : List[str] , __A : InstructBlipVisionConfig , __A : InstructBlipQFormerConfig , __A : PretrainedConfig , **__A : int , ):
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__A , )
def _lowercase ( self : Optional[int] ):
snake_case__ : Any = copy.deepcopy(self.__dict__ )
snake_case__ : Optional[Any] = self.vision_config.to_dict()
snake_case__ : List[str] = self.qformer_config.to_dict()
snake_case__ : List[Any] = self.text_config.to_dict()
snake_case__ : List[Any] = self.__class__.model_type
return output
| 25 | 1 |
from pathlib import Path
import fire
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : str , snake_case_ : int ):
snake_case__ : Tuple = Path(snake_case_ )
snake_case__ : Optional[Any] = Path(snake_case_ )
dest_dir.mkdir(exist_ok=snake_case_ )
for path in src_dir.iterdir():
snake_case__ : Tuple = [x.rstrip() for x in list(path.open().readlines() )][:n]
snake_case__ : Optional[int] = dest_dir.joinpath(path.name )
print(snake_case_ )
dest_path.open("w" ).write("\n".join(snake_case_ ) )
if __name__ == "__main__":
fire.Fire(minify)
| 25 |
def SCREAMING_SNAKE_CASE ( snake_case_ : list ):
if len(snake_case_ ) <= 1:
return lst
snake_case__ : List[Any] = 1
while i < len(snake_case_ ):
if lst[i - 1] <= lst[i]:
i += 1
else:
snake_case__, snake_case__ : Tuple = lst[i], lst[i - 1]
i -= 1
if i == 0:
snake_case__ : Union[str, Any] = 1
return lst
if __name__ == "__main__":
__lowerCamelCase : Dict = input("""Enter numbers separated by a comma:\n""").strip()
__lowerCamelCase : Tuple = [int(item) for item in user_input.split(""",""")]
print(gnome_sort(unsorted))
| 25 | 1 |
from string import ascii_uppercase
__lowerCamelCase : List[Any] = {str(ord(c) - 55): c for c in ascii_uppercase}
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int ):
if isinstance(snake_case_ , snake_case_ ):
raise TypeError("int() can't convert non-string with explicit base" )
if num < 0:
raise ValueError("parameter must be positive int" )
if isinstance(snake_case_ , snake_case_ ):
raise TypeError("'str' object cannot be interpreted as an integer" )
if isinstance(snake_case_ , snake_case_ ):
raise TypeError("'float' object cannot be interpreted as an integer" )
if base in (0, 1):
raise ValueError("base must be >= 2" )
if base > 36:
raise ValueError("base must be <= 36" )
snake_case__ : Union[str, Any] = ""
snake_case__ : List[Any] = 0
snake_case__ : str = 0
while div != 1:
snake_case__, snake_case__ : Optional[Any] = divmod(snake_case_ , snake_case_ )
if base >= 11 and 9 < mod < 36:
snake_case__ : Union[str, Any] = ALPHABET_VALUES[str(snake_case_ )]
else:
snake_case__ : Optional[Any] = str(snake_case_ )
new_value += actual_value
snake_case__ : Any = num // base
snake_case__ : Any = div
if div == 0:
return str(new_value[::-1] )
elif div == 1:
new_value += str(snake_case_ )
return str(new_value[::-1] )
return new_value[::-1]
if __name__ == "__main__":
import doctest
doctest.testmod()
for base in range(2, 37):
for num in range(1000):
assert int(decimal_to_any(num, base), base) == num, (
num,
base,
decimal_to_any(num, base),
int(decimal_to_any(num, base), base),
)
| 25 |
from __future__ import annotations
import time
__lowerCamelCase : str = list[tuple[int, int]]
__lowerCamelCase : Optional[int] = [
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0],
[1, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0],
]
__lowerCamelCase : Tuple = [[-1, 0], [0, -1], [1, 0], [0, 1]] # up, left, down, right
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Union[str, Any] , __A : int , __A : int , __A : int , __A : int , __A : Node | None ):
snake_case__ : Optional[int] = pos_x
snake_case__ : Dict = pos_y
snake_case__ : int = (pos_y, pos_x)
snake_case__ : Optional[int] = goal_x
snake_case__ : Tuple = goal_y
snake_case__ : str = parent
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[Any] , __A : tuple[int, int] , __A : tuple[int, int] ):
snake_case__ : Tuple = Node(start[1] , start[0] , goal[1] , goal[0] , __A )
snake_case__ : Tuple = Node(goal[1] , goal[0] , goal[1] , goal[0] , __A )
snake_case__ : int = [self.start]
snake_case__ : Union[str, Any] = False
def _lowercase ( self : Dict ):
while self.node_queue:
snake_case__ : Optional[Any] = self.node_queue.pop(0 )
if current_node.pos == self.target.pos:
snake_case__ : Optional[Any] = True
return self.retrace_path(__A )
snake_case__ : int = self.get_successors(__A )
for node in successors:
self.node_queue.append(__A )
if not self.reached:
return [self.start.pos]
return None
def _lowercase ( self : Union[str, Any] , __A : Node ):
snake_case__ : str = []
for action in delta:
snake_case__ : str = parent.pos_x + action[1]
snake_case__ : Union[str, Any] = parent.pos_y + action[0]
if not (0 <= pos_x <= len(grid[0] ) - 1 and 0 <= pos_y <= len(__A ) - 1):
continue
if grid[pos_y][pos_x] != 0:
continue
successors.append(
Node(__A , __A , self.target.pos_y , self.target.pos_x , __A ) )
return successors
def _lowercase ( self : Optional[Any] , __A : Node | None ):
snake_case__ : Tuple = node
snake_case__ : Any = []
while current_node is not None:
path.append((current_node.pos_y, current_node.pos_x) )
snake_case__ : Tuple = current_node.parent
path.reverse()
return path
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Dict , __A : str , __A : int ):
snake_case__ : str = BreadthFirstSearch(__A , __A )
snake_case__ : int = BreadthFirstSearch(__A , __A )
snake_case__ : Tuple = False
def _lowercase ( self : Optional[Any] ):
while self.fwd_bfs.node_queue or self.bwd_bfs.node_queue:
snake_case__ : Any = self.fwd_bfs.node_queue.pop(0 )
snake_case__ : List[str] = self.bwd_bfs.node_queue.pop(0 )
if current_bwd_node.pos == current_fwd_node.pos:
snake_case__ : List[str] = True
return self.retrace_bidirectional_path(
__A , __A )
snake_case__ : Union[str, Any] = current_bwd_node
snake_case__ : Dict = current_fwd_node
snake_case__ : List[Any] = {
self.fwd_bfs: self.fwd_bfs.get_successors(__A ),
self.bwd_bfs: self.bwd_bfs.get_successors(__A ),
}
for bfs in [self.fwd_bfs, self.bwd_bfs]:
for node in successors[bfs]:
bfs.node_queue.append(__A )
if not self.reached:
return [self.fwd_bfs.start.pos]
return None
def _lowercase ( self : Any , __A : Node , __A : Node ):
snake_case__ : List[str] = self.fwd_bfs.retrace_path(__A )
snake_case__ : Optional[Any] = self.bwd_bfs.retrace_path(__A )
bwd_path.pop()
bwd_path.reverse()
snake_case__ : List[Any] = fwd_path + bwd_path
return path
if __name__ == "__main__":
# all coordinates are given in format [y,x]
import doctest
doctest.testmod()
__lowerCamelCase : str = (0, 0)
__lowerCamelCase : List[str] = (len(grid) - 1, len(grid[0]) - 1)
for elem in grid:
print(elem)
__lowerCamelCase : Any = time.time()
__lowerCamelCase : Optional[Any] = BreadthFirstSearch(init, goal)
__lowerCamelCase : str = bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bfs_time
print("""Unidirectional BFS computation time : """, bfs_time)
__lowerCamelCase : Optional[Any] = time.time()
__lowerCamelCase : Optional[int] = BidirectionalBreadthFirstSearch(init, goal)
__lowerCamelCase : str = bd_bfs.search()
__lowerCamelCase : Optional[Any] = time.time() - start_bd_bfs_time
print("""Bidirectional BFS computation time : """, bd_bfs_time)
| 25 | 1 |
# Usage:
# ./gen-card-facebook-wmt19.py
import os
from pathlib import Path
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[Any] , snake_case_ : Tuple , snake_case_ : Tuple ):
snake_case__ : Tuple = {
"en": "Machine learning is great, isn't it?",
"ru": "Машинное обучение - это здорово, не так ли?",
"de": "Maschinelles Lernen ist großartig, oder?",
}
# BLUE scores as follows:
# "pair": [fairseq, transformers]
snake_case__ : str = {
"ru-en": ["[41.3](http://matrix.statmt.org/matrix/output/1907?run_id=6937)", "39.20"],
"en-ru": ["[36.4](http://matrix.statmt.org/matrix/output/1914?run_id=6724)", "33.47"],
"en-de": ["[43.1](http://matrix.statmt.org/matrix/output/1909?run_id=6862)", "42.83"],
"de-en": ["[42.3](http://matrix.statmt.org/matrix/output/1902?run_id=6750)", "41.35"],
}
snake_case__ : Tuple = F'''{src_lang}-{tgt_lang}'''
snake_case__ : Any = F'''
---
language:
- {src_lang}
- {tgt_lang}
thumbnail:
tags:
- translation
- wmt19
- facebook
license: apache-2.0
datasets:
- wmt19
metrics:
- bleu
---
# FSMT
## Model description
This is a ported version of [fairseq wmt19 transformer](https://github.com/pytorch/fairseq/blob/master/examples/wmt19/README.md) for {src_lang}-{tgt_lang}.
For more details, please see, [Facebook FAIR\'s WMT19 News Translation Task Submission](https://arxiv.org/abs/1907.06616).
The abbreviation FSMT stands for FairSeqMachineTranslation
All four models are available:
* [wmt19-en-ru](https://huggingface.co/facebook/wmt19-en-ru)
* [wmt19-ru-en](https://huggingface.co/facebook/wmt19-ru-en)
* [wmt19-en-de](https://huggingface.co/facebook/wmt19-en-de)
* [wmt19-de-en](https://huggingface.co/facebook/wmt19-de-en)
## Intended uses & limitations
#### How to use
```python
from transformers import FSMTForConditionalGeneration, FSMTTokenizer
mname = "facebook/wmt19-{src_lang}-{tgt_lang}"
tokenizer = FSMTTokenizer.from_pretrained(mname)
model = FSMTForConditionalGeneration.from_pretrained(mname)
input = "{texts[src_lang]}"
input_ids = tokenizer.encode(input, return_tensors="pt")
outputs = model.generate(input_ids)
decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
print(decoded) # {texts[tgt_lang]}
```
#### Limitations and bias
- The original (and this ported model) doesn\'t seem to handle well inputs with repeated sub-phrases, [content gets truncated](https://discuss.huggingface.co/t/issues-with-translating-inputs-containing-repeated-phrases/981)
## Training data
Pretrained weights were left identical to the original model released by fairseq. For more details, please, see the [paper](https://arxiv.org/abs/1907.06616).
## Eval results
pair | fairseq | transformers
-------|---------|----------
{pair} | {scores[pair][0]} | {scores[pair][1]}
The score is slightly below the score reported by `fairseq`, since `transformers`` currently doesn\'t support:
- model ensemble, therefore the best performing checkpoint was ported (``model4.pt``).
- re-ranking
The score was calculated using this code:
```bash
git clone https://github.com/huggingface/transformers
cd transformers
export PAIR={pair}
export DATA_DIR=data/$PAIR
export SAVE_DIR=data/$PAIR
export BS=8
export NUM_BEAMS=15
mkdir -p $DATA_DIR
sacrebleu -t wmt19 -l $PAIR --echo src > $DATA_DIR/val.source
sacrebleu -t wmt19 -l $PAIR --echo ref > $DATA_DIR/val.target
echo $PAIR
PYTHONPATH="src:examples/seq2seq" python examples/seq2seq/run_eval.py facebook/wmt19-$PAIR $DATA_DIR/val.source $SAVE_DIR/test_translations.txt --reference_path $DATA_DIR/val.target --score_path $SAVE_DIR/test_bleu.json --bs $BS --task translation --num_beams $NUM_BEAMS
```
note: fairseq reports using a beam of 50, so you should get a slightly higher score if re-run with `--num_beams 50`.
## Data Sources
- [training, etc.](http://www.statmt.org/wmt19/)
- [test set](http://matrix.statmt.org/test_sets/newstest2019.tgz?1556572561)
### BibTeX entry and citation info
```bibtex
@inproceedings{{...,
year={{2020}},
title={{Facebook FAIR\'s WMT19 News Translation Task Submission}},
author={{Ng, Nathan and Yee, Kyra and Baevski, Alexei and Ott, Myle and Auli, Michael and Edunov, Sergey}},
booktitle={{Proc. of WMT}},
}}
```
## TODO
- port model ensemble (fairseq uses 4 model checkpoints)
'''
os.makedirs(snake_case_ , exist_ok=snake_case_ )
snake_case__ : Any = os.path.join(snake_case_ , "README.md" )
print(F'''Generating {path}''' )
with open(snake_case_ , "w" , encoding="utf-8" ) as f:
f.write(snake_case_ )
# make sure we are under the root of the project
__lowerCamelCase : Optional[int] = Path(__file__).resolve().parent.parent.parent
__lowerCamelCase : List[Any] = repo_dir / """model_cards"""
for model_name in ["wmt19-ru-en", "wmt19-en-ru", "wmt19-en-de", "wmt19-de-en"]:
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase : Any = model_name.split("""-""")
__lowerCamelCase : int = model_cards_dir / """facebook""" / model_name
write_model_card(model_card_dir, src_lang=src_lang, tgt_lang=tgt_lang)
| 25 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ConditionalDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : List[Any] , __A : Dict , __A : int=7 , __A : Optional[Any]=3 , __A : List[str]=3_0 , __A : List[Any]=4_0_0 , __A : Union[str, Any]=True , __A : List[Any]=None , __A : Optional[Any]=True , __A : Tuple=[0.5, 0.5, 0.5] , __A : Union[str, Any]=[0.5, 0.5, 0.5] , __A : List[str]=True , __A : Any=1 / 2_5_5 , __A : Optional[int]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Dict = parent
snake_case__ : Optional[int] = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : str = min_resolution
snake_case__ : Tuple = max_resolution
snake_case__ : List[Any] = do_resize
snake_case__ : Dict = size
snake_case__ : List[str] = do_normalize
snake_case__ : Optional[int] = image_mean
snake_case__ : Optional[int] = image_std
snake_case__ : Any = do_rescale
snake_case__ : Optional[int] = rescale_factor
snake_case__ : int = do_pad
def _lowercase ( self : Dict ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[int] , __A : Dict , __A : List[Any]=False ):
if not batched:
snake_case__ : List[str] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : Tuple = image.size
else:
snake_case__, snake_case__ : List[str] = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Dict = int(self.size["shortest_edge"] * h / w )
snake_case__ : Optional[int] = self.size["shortest_edge"]
elif w > h:
snake_case__ : List[Any] = self.size["shortest_edge"]
snake_case__ : Union[str, Any] = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Dict = self.size["shortest_edge"]
snake_case__ : Dict = self.size["shortest_edge"]
else:
snake_case__ : str = []
for image in image_inputs:
snake_case__, snake_case__ : str = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : Dict = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : Tuple = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = ConditionalDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : int ):
snake_case__ : Tuple = ConditionalDetrImageProcessingTester(self )
@property
def _lowercase ( self : Any ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Any ):
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : List[str] ):
snake_case__ : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : Union[str, Any] ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Dict = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Dict = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Union[str, Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : int = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Dict = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Optional[Any] = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : str = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Tuple ):
# Initialize image_processing
snake_case__ : Optional[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Optional[int] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Dict = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : List[Any] ):
# prepare image and target
snake_case__ : Union[str, Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Union[str, Any] = json.loads(f.read() )
snake_case__ : Optional[Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : Tuple = ConditionalDetrImageProcessor.from_pretrained("microsoft/conditional-detr-resnet-50" )
snake_case__ : int = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : str = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Tuple = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Optional[int] = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Tuple = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : List[Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[int] = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : Dict = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[str] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : str ):
# prepare image, target and masks_path
snake_case__ : Optional[Any] = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : int = json.loads(f.read() )
snake_case__ : Optional[int] = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : Optional[Any] = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : Optional[int] = ConditionalDetrImageProcessor(format="coco_panoptic" )
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : List[str] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Dict = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : int = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : str = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : Optional[Any] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : str = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : List[Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 1 |
from ...configuration_utils import PretrainedConfig
__lowerCamelCase : str = {
"""google/tapas-base-finetuned-sqa""": (
"""https://huggingface.co/google/tapas-base-finetuned-sqa/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wtq""": (
"""https://huggingface.co/google/tapas-base-finetuned-wtq/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-wikisql-supervised""": (
"""https://huggingface.co/google/tapas-base-finetuned-wikisql-supervised/resolve/main/config.json"""
),
"""google/tapas-base-finetuned-tabfact""": (
"""https://huggingface.co/google/tapas-base-finetuned-tabfact/resolve/main/config.json"""
),
}
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = "tapas"
def __init__( self : Any , __A : str=3_0_5_2_2 , __A : Optional[Any]=7_6_8 , __A : Optional[Any]=1_2 , __A : Tuple=1_2 , __A : Optional[Any]=3_0_7_2 , __A : Optional[Any]="gelu" , __A : Optional[int]=0.1 , __A : Dict=0.1 , __A : Optional[Any]=1_0_2_4 , __A : Optional[Any]=[3, 2_5_6, 2_5_6, 2, 2_5_6, 2_5_6, 1_0] , __A : List[Any]=0.0_2 , __A : Union[str, Any]=1e-1_2 , __A : List[Any]=0 , __A : Dict=1_0.0 , __A : Optional[int]=0 , __A : Dict=1.0 , __A : int=None , __A : Dict=1.0 , __A : Dict=False , __A : List[str]=None , __A : List[Any]=1.0 , __A : Dict=1.0 , __A : str=False , __A : Optional[Any]=False , __A : List[str]="ratio" , __A : List[Any]=None , __A : str=None , __A : Dict=6_4 , __A : int=3_2 , __A : int=False , __A : Dict=True , __A : List[str]=False , __A : Dict=False , __A : Dict=True , __A : Dict=False , __A : str=None , __A : int=None , **__A : int , ):
super().__init__(pad_token_id=__A , **__A )
# BERT hyperparameters (with updated max_position_embeddings and type_vocab_sizes)
snake_case__ : Optional[int] = vocab_size
snake_case__ : int = hidden_size
snake_case__ : Dict = num_hidden_layers
snake_case__ : Tuple = num_attention_heads
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = intermediate_size
snake_case__ : Tuple = hidden_dropout_prob
snake_case__ : Dict = attention_probs_dropout_prob
snake_case__ : Tuple = max_position_embeddings
snake_case__ : Optional[int] = type_vocab_sizes
snake_case__ : Optional[Any] = initializer_range
snake_case__ : Tuple = layer_norm_eps
# Fine-tuning task hyperparameters
snake_case__ : int = positive_label_weight
snake_case__ : Any = num_aggregation_labels
snake_case__ : Optional[int] = aggregation_loss_weight
snake_case__ : Optional[int] = use_answer_as_supervision
snake_case__ : int = answer_loss_importance
snake_case__ : Optional[int] = use_normalized_answer_loss
snake_case__ : Optional[Any] = huber_loss_delta
snake_case__ : int = temperature
snake_case__ : Any = aggregation_temperature
snake_case__ : List[str] = use_gumbel_for_cells
snake_case__ : Optional[int] = use_gumbel_for_aggregation
snake_case__ : int = average_approximation_function
snake_case__ : Optional[int] = cell_selection_preference
snake_case__ : Any = answer_loss_cutoff
snake_case__ : int = max_num_rows
snake_case__ : List[Any] = max_num_columns
snake_case__ : int = average_logits_per_cell
snake_case__ : Union[str, Any] = select_one_column
snake_case__ : str = allow_empty_column_selection
snake_case__ : List[str] = init_cell_selection_weights_to_zero
snake_case__ : Optional[Any] = reset_position_index_per_cell
snake_case__ : Optional[int] = disable_per_token_loss
# Aggregation hyperparameters
snake_case__ : Tuple = aggregation_labels
snake_case__ : Any = no_aggregation_label_index
if isinstance(self.aggregation_labels , __A ):
snake_case__ : str = {int(__A ): v for k, v in aggregation_labels.items()}
| 25 |
import faiss # noqa: F401 # Here to have a nice missing dependency error message early on
import numpy # noqa: F401 # Here to have a nice missing dependency error message early on
import requests # noqa: F401 # Here to have a nice missing dependency error message early on
import sklearn # noqa: F401 # Here to have a nice missing dependency error message early on
import tqdm # noqa: F401 # Here to have a nice missing dependency error message early on
from mauve import compute_mauve # From: mauve-text
import datasets
__lowerCamelCase : Optional[int] = """\
@inproceedings{pillutla-etal:mauve:neurips2021,
title={MAUVE: Measuring the Gap Between Neural Text and Human Text using Divergence Frontiers},
author={Pillutla, Krishna and Swayamdipta, Swabha and Zellers, Rowan and Thickstun, John and Welleck, Sean and Choi, Yejin and Harchaoui, Zaid},
booktitle = {NeurIPS},
year = {2021}
}
"""
__lowerCamelCase : str = """\
MAUVE is a library built on PyTorch and HuggingFace Transformers to measure the gap between neural text and human text with the eponymous MAUVE measure.
MAUVE summarizes both Type I and Type II errors measured softly using Kullback–Leibler (KL) divergences.
For details, see the MAUVE paper: https://arxiv.org/abs/2102.01454 (Neurips, 2021).
This metrics is a wrapper around the official implementation of MAUVE:
https://github.com/krishnap25/mauve
"""
__lowerCamelCase : str = """
Calculates MAUVE scores between two lists of generated text and reference text.
Args:
predictions: list of generated text to score. Each predictions
should be a string with tokens separated by spaces.
references: list of reference for each prediction. Each
reference should be a string with tokens separated by spaces.
Optional Args:
num_buckets: the size of the histogram to quantize P and Q. Options: 'auto' (default) or an integer
pca_max_data: the number data points to use for PCA dimensionality reduction prior to clustering. If -1, use all the data. Default -1
kmeans_explained_var: amount of variance of the data to keep in dimensionality reduction by PCA. Default 0.9
kmeans_num_redo: number of times to redo k-means clustering (the best objective is kept). Default 5
kmeans_max_iter: maximum number of k-means iterations. Default 500
featurize_model_name: name of the model from which features are obtained. Default 'gpt2-large' Use one of ['gpt2', 'gpt2-medium', 'gpt2-large', 'gpt2-xl'].
device_id: Device for featurization. Supply a GPU id (e.g. 0 or 3) to use GPU. If no GPU with this id is found, use CPU
max_text_length: maximum number of tokens to consider. Default 1024
divergence_curve_discretization_size: Number of points to consider on the divergence curve. Default 25
mauve_scaling_factor: \"c\" from the paper. Default 5.
verbose: If True (default), print running time updates
seed: random seed to initialize k-means cluster assignments.
Returns:
mauve: MAUVE score, a number between 0 and 1. Larger values indicate that P and Q are closer,
frontier_integral: Frontier Integral, a number between 0 and 1. Smaller values indicate that P and Q are closer,
divergence_curve: a numpy.ndarray of shape (m, 2); plot it with matplotlib to view the divergence curve,
p_hist: a discrete distribution, which is a quantized version of the text distribution p_text,
q_hist: same as above, but with q_text.
Examples:
>>> # faiss segfaults in doctest for some reason, so the .compute call is not tested with doctest
>>> import datasets
>>> mauve = datasets.load_metric('mauve')
>>> predictions = [\"hello there\", \"general kenobi\"]
>>> references = [\"hello there\", \"general kenobi\"]
>>> out = mauve.compute(predictions=predictions, references=references) # doctest: +SKIP
>>> print(out.mauve) # doctest: +SKIP
1.0
"""
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class SCREAMING_SNAKE_CASE__ ( datasets.Metric ):
"""simple docstring"""
def _lowercase ( self : Dict ):
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , homepage="https://github.com/krishnap25/mauve" , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"predictions": datasets.Value("string" , id="sequence" ),
"references": datasets.Value("string" , id="sequence" ),
} ) , codebase_urls=["https://github.com/krishnap25/mauve"] , reference_urls=[
"https://arxiv.org/abs/2102.01454",
"https://github.com/krishnap25/mauve",
] , )
def _lowercase ( self : Union[str, Any] , __A : Dict , __A : List[str] , __A : int=None , __A : List[Any]=None , __A : Optional[int]=None , __A : List[Any]=None , __A : Union[str, Any]="auto" , __A : Optional[Any]=-1 , __A : Optional[Any]=0.9 , __A : Any=5 , __A : List[Any]=5_0_0 , __A : Tuple="gpt2-large" , __A : Optional[Any]=-1 , __A : str=1_0_2_4 , __A : Tuple=2_5 , __A : str=5 , __A : Optional[int]=True , __A : Any=2_5 , ):
snake_case__ : List[Any] = compute_mauve(
p_text=__A , q_text=__A , p_features=__A , q_features=__A , p_tokens=__A , q_tokens=__A , num_buckets=__A , pca_max_data=__A , kmeans_explained_var=__A , kmeans_num_redo=__A , kmeans_max_iter=__A , featurize_model_name=__A , device_id=__A , max_text_length=__A , divergence_curve_discretization_size=__A , mauve_scaling_factor=__A , verbose=__A , seed=__A , )
return out
| 25 | 1 |
import gc
import unittest
import numpy as np
import torch
from diffusers import StableDiffusionKDiffusionPipeline
from diffusers.utils import slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
@slow
@require_torch_gpu
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : str ):
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _lowercase ( self : Optional[int] ):
snake_case__ : Any = StableDiffusionKDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4" )
snake_case__ : int = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("sample_euler" )
snake_case__ : Optional[int] = "A painting of a squirrel eating a burger"
snake_case__ : int = torch.manual_seed(0 )
snake_case__ : Dict = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=2_0 , output_type="np" )
snake_case__ : str = output.images
snake_case__ : str = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Union[str, Any] = np.array([0.0_4_4_7, 0.0_4_9_2, 0.0_4_6_8, 0.0_4_0_8, 0.0_3_8_3, 0.0_4_0_8, 0.0_3_5_4, 0.0_3_8_0, 0.0_3_3_9] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _lowercase ( self : Any ):
snake_case__ : Tuple = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
snake_case__ : str = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("sample_euler" )
snake_case__ : Dict = "A painting of a squirrel eating a burger"
snake_case__ : Tuple = torch.manual_seed(0 )
snake_case__ : int = sd_pipe([prompt] , generator=__A , guidance_scale=9.0 , num_inference_steps=2_0 , output_type="np" )
snake_case__ : Any = output.images
snake_case__ : Dict = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Any = np.array([0.1_2_3_7, 0.1_3_2_0, 0.1_4_3_8, 0.1_3_5_9, 0.1_3_9_0, 0.1_1_3_2, 0.1_2_7_7, 0.1_1_7_5, 0.1_1_1_2] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 5e-1
def _lowercase ( self : Dict ):
snake_case__ : Optional[int] = StableDiffusionKDiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-2-1-base" )
snake_case__ : List[Any] = sd_pipe.to(__A )
sd_pipe.set_progress_bar_config(disable=__A )
sd_pipe.set_scheduler("sample_dpmpp_2m" )
snake_case__ : int = "A painting of a squirrel eating a burger"
snake_case__ : str = torch.manual_seed(0 )
snake_case__ : Optional[Any] = sd_pipe(
[prompt] , generator=__A , guidance_scale=7.5 , num_inference_steps=1_5 , output_type="np" , use_karras_sigmas=__A , )
snake_case__ : Dict = output.images
snake_case__ : List[str] = image[0, -3:, -3:, -1]
assert image.shape == (1, 5_1_2, 5_1_2, 3)
snake_case__ : Any = np.array(
[0.1_1_3_8_1_6_8_9, 0.1_2_1_1_2_9_2_1, 0.1_3_8_9_4_5_7, 0.1_2_5_4_9_6_0_6, 0.1_2_4_4_9_6_4, 0.1_0_8_3_1_5_1_7, 0.1_1_5_6_2_8_6_6, 0.1_0_8_6_7_8_1_6, 0.1_0_4_9_9_0_4_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
| 25 |
# Lint as: python3
# pylint: enable=line-too-long
# pylint: disable=g-import-not-at-top,g-bad-import-order,wrong-import-position
__lowerCamelCase : Union[str, Any] = """2.13.1"""
import platform
import pyarrow
from packaging import version
if version.parse(platform.python_version()) < version.parse("""3.7"""):
raise ImportWarning(
"""To use `datasets`, Python>=3.7 is required, and the current version of Python doesn't match this condition."""
)
if version.parse(pyarrow.__version__).major < 8:
raise ImportWarning(
"""To use `datasets`, the module `pyarrow>=8.0.0` is required, and the current version of `pyarrow` doesn't match this condition.\n"""
"""If you are running this in a Google Colab, you should probably just restart the runtime to use the right version of `pyarrow`."""
)
del platform
del pyarrow
del version
from .arrow_dataset import Dataset
from .arrow_reader import ReadInstruction
from .builder import ArrowBasedBuilder, BeamBasedBuilder, BuilderConfig, DatasetBuilder, GeneratorBasedBuilder
from .combine import concatenate_datasets, interleave_datasets
from .dataset_dict import DatasetDict, IterableDatasetDict
from .download import *
from .features import *
from .fingerprint import disable_caching, enable_caching, is_caching_enabled, set_caching_enabled
from .info import DatasetInfo, MetricInfo
from .inspect import (
get_dataset_config_info,
get_dataset_config_names,
get_dataset_infos,
get_dataset_split_names,
inspect_dataset,
inspect_metric,
list_datasets,
list_metrics,
)
from .iterable_dataset import IterableDataset
from .load import load_dataset, load_dataset_builder, load_from_disk, load_metric
from .metric import Metric
from .splits import (
NamedSplit,
NamedSplitAll,
Split,
SplitBase,
SplitDict,
SplitGenerator,
SplitInfo,
SubSplitInfo,
percent,
)
from .tasks import *
from .utils import *
from .utils import logging
# deprecated modules
from datasets import arrow_dataset as _arrow_dataset # isort:skip
from datasets import utils as _utils # isort:skip
from datasets.utils import download_manager as _deprecated_download_manager # isort:skip
__lowerCamelCase : List[Any] = concatenate_datasets
__lowerCamelCase : List[str] = DownloadConfig
__lowerCamelCase : Union[str, Any] = DownloadManager
__lowerCamelCase : str = DownloadMode
__lowerCamelCase : Union[str, Any] = DownloadConfig
__lowerCamelCase : List[str] = DownloadMode
__lowerCamelCase : Dict = DownloadManager
del _arrow_dataset, _utils, _deprecated_download_manager
| 25 | 1 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : str = [True] * limit
snake_case__ : str = False
snake_case__ : str = False
snake_case__ : str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
snake_case__ : Optional[Any] = i * 2
while index < limit:
snake_case__ : Union[str, Any] = False
snake_case__ : Any = index + i
snake_case__ : Optional[Any] = [2]
for i in range(3 , snake_case_ , 2 ):
if is_prime[i]:
primes.append(snake_case_ )
return primes
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000000 ):
snake_case__ : Optional[int] = prime_sieve(snake_case_ )
snake_case__ : List[Any] = 0
snake_case__ : List[str] = 0
for i in range(len(snake_case_ ) ):
for j in range(i + length , len(snake_case_ ) ):
snake_case__ : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
snake_case__ : Tuple = j - i
snake_case__ : str = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 25 |
from __future__ import annotations
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : str = [True] * limit
snake_case__ : str = False
snake_case__ : str = False
snake_case__ : str = True
for i in range(3 , int(limit**0.5 + 1 ) , 2 ):
snake_case__ : Optional[Any] = i * 2
while index < limit:
snake_case__ : Union[str, Any] = False
snake_case__ : Any = index + i
snake_case__ : Optional[Any] = [2]
for i in range(3 , snake_case_ , 2 ):
if is_prime[i]:
primes.append(snake_case_ )
return primes
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000000 ):
snake_case__ : Optional[int] = prime_sieve(snake_case_ )
snake_case__ : List[Any] = 0
snake_case__ : List[str] = 0
for i in range(len(snake_case_ ) ):
for j in range(i + length , len(snake_case_ ) ):
snake_case__ : Dict = sum(primes[i:j] )
if sol >= ceiling:
break
if sol in primes:
snake_case__ : Tuple = j - i
snake_case__ : str = sol
return largest
if __name__ == "__main__":
print(f"{solution() = }")
| 25 | 1 |
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __A : List[Any] , __A : Optional[int]=1_3 , __A : Optional[int]=7 , __A : Optional[Any]=True , __A : int=True , __A : Optional[Any]=True , __A : Dict=True , __A : Tuple=9_9 , __A : Optional[Any]=1_6 , __A : Dict=3_6 , __A : Optional[Any]=6 , __A : List[Any]=6 , __A : Tuple=6 , __A : Any=3_7 , __A : Optional[Any]="gelu" , __A : Tuple=0.1 , __A : int=0.1 , __A : List[Any]=5_1_2 , __A : Tuple=1_6 , __A : List[Any]=2 , __A : Any=0.0_2 , __A : int=3 , __A : Any=4 , __A : int=None , ):
snake_case__ : Dict = parent
snake_case__ : Optional[Any] = batch_size
snake_case__ : List[str] = seq_length
snake_case__ : List[str] = is_training
snake_case__ : Optional[int] = use_input_mask
snake_case__ : List[Any] = use_token_type_ids
snake_case__ : List[Any] = use_labels
snake_case__ : Optional[Any] = vocab_size
snake_case__ : Optional[int] = embedding_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Optional[int] = num_hidden_layers
snake_case__ : List[Any] = num_hidden_groups
snake_case__ : Any = num_attention_heads
snake_case__ : int = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Dict = hidden_dropout_prob
snake_case__ : List[Any] = attention_probs_dropout_prob
snake_case__ : Dict = max_position_embeddings
snake_case__ : Tuple = type_vocab_size
snake_case__ : int = type_sequence_label_size
snake_case__ : int = initializer_range
snake_case__ : List[str] = num_labels
snake_case__ : Optional[int] = num_choices
snake_case__ : Tuple = scope
def _lowercase ( self : List[Any] ):
snake_case__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Dict = None
if self.use_input_mask:
snake_case__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
snake_case__ : int = None
if self.use_token_type_ids:
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
snake_case__ : Union[str, Any] = None
snake_case__ : List[Any] = None
snake_case__ : str = None
if self.use_labels:
snake_case__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
snake_case__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
snake_case__ : List[str] = ids_tensor([self.batch_size] , self.num_choices )
snake_case__ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _lowercase ( self : Optional[Any] ):
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def _lowercase ( self : Dict , __A : Dict , __A : List[Any] , __A : Tuple , __A : Tuple , __A : Union[str, Any] , __A : Dict , __A : str ):
snake_case__ : List[str] = AlbertModel(config=__A )
model.to(__A )
model.eval()
snake_case__ : Dict = model(__A , attention_mask=__A , token_type_ids=__A )
snake_case__ : List[str] = model(__A , token_type_ids=__A )
snake_case__ : Tuple = model(__A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _lowercase ( self : Any , __A : Dict , __A : Optional[Any] , __A : Optional[Any] , __A : Dict , __A : Union[str, Any] , __A : Optional[int] , __A : Dict ):
snake_case__ : List[Any] = AlbertForPreTraining(config=__A )
model.to(__A )
model.eval()
snake_case__ : str = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , sentence_order_label=__A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def _lowercase ( self : List[Any] , __A : int , __A : str , __A : Optional[Any] , __A : str , __A : List[str] , __A : int , __A : str ):
snake_case__ : Any = AlbertForMaskedLM(config=__A )
model.to(__A )
model.eval()
snake_case__ : Union[str, Any] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _lowercase ( self : List[str] , __A : int , __A : int , __A : Optional[int] , __A : Optional[int] , __A : Dict , __A : str , __A : int ):
snake_case__ : List[str] = AlbertForQuestionAnswering(config=__A )
model.to(__A )
model.eval()
snake_case__ : List[Any] = model(
__A , attention_mask=__A , token_type_ids=__A , start_positions=__A , end_positions=__A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _lowercase ( self : Optional[Any] , __A : Tuple , __A : Any , __A : Tuple , __A : Optional[int] , __A : Dict , __A : int , __A : Any ):
snake_case__ : Any = self.num_labels
snake_case__ : str = AlbertForSequenceClassification(__A )
model.to(__A )
model.eval()
snake_case__ : Union[str, Any] = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _lowercase ( self : Optional[Any] , __A : Union[str, Any] , __A : Optional[Any] , __A : List[str] , __A : str , __A : Union[str, Any] , __A : int , __A : Union[str, Any] ):
snake_case__ : List[str] = self.num_labels
snake_case__ : Optional[int] = AlbertForTokenClassification(config=__A )
model.to(__A )
model.eval()
snake_case__ : Tuple = model(__A , attention_mask=__A , token_type_ids=__A , labels=__A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _lowercase ( self : str , __A : Optional[int] , __A : Any , __A : Tuple , __A : Union[str, Any] , __A : Optional[Any] , __A : str , __A : int ):
snake_case__ : str = self.num_choices
snake_case__ : Union[str, Any] = AlbertForMultipleChoice(config=__A )
model.to(__A )
model.eval()
snake_case__ : Dict = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Any = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Tuple = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
snake_case__ : Optional[Any] = model(
__A , attention_mask=__A , token_type_ids=__A , labels=__A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
), (
snake_case__
),
) : Union[str, Any] = config_and_inputs
snake_case__ : Dict = {"input_ids": input_ids, "token_type_ids": token_type_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
a_ = (
{
"feature-extraction": AlbertModel,
"fill-mask": AlbertForMaskedLM,
"question-answering": AlbertForQuestionAnswering,
"text-classification": AlbertForSequenceClassification,
"token-classification": AlbertForTokenClassification,
"zero-shot": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = True
def _lowercase ( self : Union[str, Any] , __A : List[str] , __A : Tuple , __A : Union[str, Any]=False ):
snake_case__ : Optional[Any] = super()._prepare_for_class(__A , __A , return_labels=__A )
if return_labels:
if model_class in get_values(__A ):
snake_case__ : Optional[Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__A )
snake_case__ : Optional[int] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__A )
return inputs_dict
def _lowercase ( self : Union[str, Any] ):
snake_case__ : int = AlbertModelTester(self )
snake_case__ : int = ConfigTester(self , config_class=__A , hidden_size=3_7 )
def _lowercase ( self : str ):
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _lowercase ( self : List[Any] ):
snake_case__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*__A )
def _lowercase ( self : List[str] ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__A )
def _lowercase ( self : List[Any] ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__A )
def _lowercase ( self : Tuple ):
snake_case__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__A )
def _lowercase ( self : Dict ):
snake_case__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
snake_case__ : List[str] = type
self.model_tester.create_and_check_model(*__A )
@slow
def _lowercase ( self : str ):
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
snake_case__ : str = AlbertModel.from_pretrained(__A )
self.assertIsNotNone(__A )
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@slow
def _lowercase ( self : Any ):
snake_case__ : Optional[int] = AlbertModel.from_pretrained("albert-base-v2" )
snake_case__ : Union[str, Any] = torch.tensor([[0, 3_4_5, 2_3_2, 3_2_8, 7_4_0, 1_4_0, 1_6_9_5, 6_9, 6_0_7_8, 1_5_8_8, 2]] )
snake_case__ : Tuple = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
snake_case__ : Any = model(__A , attention_mask=__A )[0]
snake_case__ : str = torch.Size((1, 1_1, 7_6_8) )
self.assertEqual(output.shape , __A )
snake_case__ : List[str] = torch.tensor(
[[[-0.6_5_1_3, 1.5_0_3_5, -0.2_7_6_6], [-0.6_5_1_5, 1.5_0_4_6, -0.2_7_8_0], [-0.6_5_1_2, 1.5_0_4_9, -0.2_7_8_4]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , __A , atol=1e-4 ) )
| 25 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def __init__( self : int , __A : List[str] , __A : Union[str, Any]=7 , __A : Any=3 , __A : Optional[Any]=3_0 , __A : List[str]=4_0_0 , __A : str=True , __A : Optional[Any]=None , __A : Optional[int]=True , __A : int=[0.5, 0.5, 0.5] , __A : Dict=[0.5, 0.5, 0.5] , __A : Optional[int]=True , __A : int=1 / 2_5_5 , __A : List[str]=True , ):
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
snake_case__ : List[str] = size if size is not None else {"shortest_edge": 1_8, "longest_edge": 1_3_3_3}
snake_case__ : Optional[Any] = parent
snake_case__ : str = batch_size
snake_case__ : Union[str, Any] = num_channels
snake_case__ : Optional[Any] = min_resolution
snake_case__ : List[str] = max_resolution
snake_case__ : Tuple = do_resize
snake_case__ : str = size
snake_case__ : str = do_normalize
snake_case__ : Optional[Any] = image_mean
snake_case__ : List[str] = image_std
snake_case__ : List[str] = do_rescale
snake_case__ : Tuple = rescale_factor
snake_case__ : Tuple = do_pad
def _lowercase ( self : str ):
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _lowercase ( self : Optional[Any] , __A : List[Any] , __A : List[Any]=False ):
if not batched:
snake_case__ : List[Any] = image_inputs[0]
if isinstance(__A , Image.Image ):
snake_case__, snake_case__ : str = image.size
else:
snake_case__, snake_case__ : Dict = image.shape[1], image.shape[2]
if w < h:
snake_case__ : Any = int(self.size["shortest_edge"] * h / w )
snake_case__ : Any = self.size["shortest_edge"]
elif w > h:
snake_case__ : Optional[int] = self.size["shortest_edge"]
snake_case__ : Any = int(self.size["shortest_edge"] * w / h )
else:
snake_case__ : Tuple = self.size["shortest_edge"]
snake_case__ : int = self.size["shortest_edge"]
else:
snake_case__ : Any = []
for image in image_inputs:
snake_case__, snake_case__ : Optional[Any] = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
snake_case__ : List[Any] = max(__A , key=lambda __A : item[0] )[0]
snake_case__ : int = max(__A , key=lambda __A : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = DeformableDetrImageProcessor if is_vision_available() else None
def _lowercase ( self : str ):
snake_case__ : Optional[Any] = DeformableDetrImageProcessingTester(self )
@property
def _lowercase ( self : List[Any] ):
return self.image_processor_tester.prepare_image_processor_dict()
def _lowercase ( self : Tuple ):
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__A , "image_mean" ) )
self.assertTrue(hasattr(__A , "image_std" ) )
self.assertTrue(hasattr(__A , "do_normalize" ) )
self.assertTrue(hasattr(__A , "do_resize" ) )
self.assertTrue(hasattr(__A , "do_rescale" ) )
self.assertTrue(hasattr(__A , "do_pad" ) )
self.assertTrue(hasattr(__A , "size" ) )
def _lowercase ( self : Any ):
snake_case__ : Union[str, Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"shortest_edge": 1_8, "longest_edge": 1_3_3_3} )
self.assertEqual(image_processor.do_pad , __A )
snake_case__ : Tuple = self.image_processing_class.from_dict(
self.image_processor_dict , size=4_2 , max_size=8_4 , pad_and_return_pixel_mask=__A )
self.assertEqual(image_processor.size , {"shortest_edge": 4_2, "longest_edge": 8_4} )
self.assertEqual(image_processor.do_pad , __A )
def _lowercase ( self : str ):
pass
def _lowercase ( self : List[str] ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
snake_case__ : Optional[Any] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A )
for image in image_inputs:
self.assertIsInstance(__A , Image.Image )
# Test not batched input
snake_case__ : Tuple = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : List[str] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__, snake_case__ : List[Any] = self.image_processor_tester.get_expected_values(__A , batched=__A )
snake_case__ : int = image_processing(__A , return_tensors="pt" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : int ):
# Initialize image_processing
snake_case__ : Any = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
snake_case__ : str = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , numpify=__A )
for image in image_inputs:
self.assertIsInstance(__A , np.ndarray )
# Test not batched input
snake_case__ : Optional[Any] = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : int = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def _lowercase ( self : Union[str, Any] ):
# Initialize image_processing
snake_case__ : Optional[int] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
snake_case__ : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=__A , torchify=__A )
for image in image_inputs:
self.assertIsInstance(__A , torch.Tensor )
# Test not batched input
snake_case__ : str = image_processing(image_inputs[0] , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Union[str, Any] = self.image_processor_tester.get_expected_values(__A )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
snake_case__ : Tuple = image_processing(__A , return_tensors="pt" ).pixel_values
snake_case__, snake_case__ : Tuple = self.image_processor_tester.get_expected_values(__A , batched=__A )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def _lowercase ( self : Optional[Any] ):
# prepare image and target
snake_case__ : Tuple = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_annotations.txt" , "r" ) as f:
snake_case__ : Tuple = json.loads(f.read() )
snake_case__ : Union[str, Any] = {"image_id": 3_9_7_6_9, "annotations": target}
# encode them
snake_case__ : str = DeformableDetrImageProcessor()
snake_case__ : Tuple = image_processing(images=__A , annotations=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : Optional[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Union[str, Any] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : str = torch.tensor([5_8_8_7.9_6_0_0, 1_1_2_5_0.2_0_6_1, 4_8_9_3_5_3.8_4_3_8, 8_3_7_1_2_2.7_5_0_0, 1_4_7_9_6_7.5_1_5_6, 1_6_5_7_3_2.3_4_3_8] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Union[str, Any] = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : List[Any] = torch.tensor([0.5_5_0_3, 0.2_7_6_5, 0.0_6_0_4, 0.2_2_1_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : Any = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Tuple = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : int = torch.tensor([7_5, 7_5, 6_3, 6_5, 1_7, 1_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify orig_size
snake_case__ : List[str] = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Tuple = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
@slow
def _lowercase ( self : Optional[int] ):
# prepare image, target and masks_path
snake_case__ : Dict = Image.open("./tests/fixtures/tests_samples/COCO/000000039769.png" )
with open("./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt" , "r" ) as f:
snake_case__ : Any = json.loads(f.read() )
snake_case__ : Dict = {"file_name": "000000039769.png", "image_id": 3_9_7_6_9, "segments_info": target}
snake_case__ : int = pathlib.Path("./tests/fixtures/tests_samples/COCO/coco_panoptic" )
# encode them
snake_case__ : List[str] = DeformableDetrImageProcessor(format="coco_panoptic" )
snake_case__ : List[Any] = image_processing(images=__A , annotations=__A , masks_path=__A , return_tensors="pt" )
# verify pixel values
snake_case__ : List[Any] = torch.Size([1, 3, 8_0_0, 1_0_6_6] )
self.assertEqual(encoding["pixel_values"].shape , __A )
snake_case__ : Optional[int] = torch.tensor([0.2_7_9_6, 0.3_1_3_8, 0.3_4_8_1] )
self.assertTrue(torch.allclose(encoding["pixel_values"][0, 0, 0, :3] , __A , atol=1e-4 ) )
# verify area
snake_case__ : Tuple = torch.tensor([1_4_7_9_7_9.6_8_7_5, 1_6_5_5_2_7.0_4_6_9, 4_8_4_6_3_8.5_9_3_8, 1_1_2_9_2.9_3_7_5, 5_8_7_9.6_5_6_2, 7_6_3_4.1_1_4_7] )
self.assertTrue(torch.allclose(encoding["labels"][0]["area"] , __A ) )
# verify boxes
snake_case__ : Any = torch.Size([6, 4] )
self.assertEqual(encoding["labels"][0]["boxes"].shape , __A )
snake_case__ : Any = torch.tensor([0.2_6_2_5, 0.5_4_3_7, 0.4_6_8_8, 0.8_6_2_5] )
self.assertTrue(torch.allclose(encoding["labels"][0]["boxes"][0] , __A , atol=1e-3 ) )
# verify image_id
snake_case__ : List[str] = torch.tensor([3_9_7_6_9] )
self.assertTrue(torch.allclose(encoding["labels"][0]["image_id"] , __A ) )
# verify is_crowd
snake_case__ : Any = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["iscrowd"] , __A ) )
# verify class_labels
snake_case__ : List[str] = torch.tensor([1_7, 1_7, 6_3, 7_5, 7_5, 9_3] )
self.assertTrue(torch.allclose(encoding["labels"][0]["class_labels"] , __A ) )
# verify masks
snake_case__ : Union[str, Any] = 8_2_2_8_7_3
self.assertEqual(encoding["labels"][0]["masks"].sum().item() , __A )
# verify orig_size
snake_case__ : int = torch.tensor([4_8_0, 6_4_0] )
self.assertTrue(torch.allclose(encoding["labels"][0]["orig_size"] , __A ) )
# verify size
snake_case__ : Union[str, Any] = torch.tensor([8_0_0, 1_0_6_6] )
self.assertTrue(torch.allclose(encoding["labels"][0]["size"] , __A ) )
| 25 | 1 |
import argparse
import torch
from torch import nn
from transformers import MBartConfig, MBartForConditionalGeneration
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict ):
snake_case__ : Any = [
"encoder.version",
"decoder.version",
"model.encoder.version",
"model.decoder.version",
"_float_tensor",
"decoder.output_projection.weight",
]
for k in ignore_keys:
state_dict.pop(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__, snake_case__ : Optional[Any] = emb.weight.shape
snake_case__ : int = nn.Linear(snake_case_ , snake_case_ , bias=snake_case_ )
snake_case__ : List[Any] = emb.weight.data
return lin_layer
def SCREAMING_SNAKE_CASE ( snake_case_ : Optional[int] , snake_case_ : Union[str, Any]="facebook/mbart-large-en-ro" , snake_case_ : List[Any]=False , snake_case_ : List[Any]=False ):
snake_case__ : List[str] = torch.load(snake_case_ , map_location="cpu" )["model"]
remove_ignore_keys_(snake_case_ )
snake_case__ : Optional[Any] = state_dict["encoder.embed_tokens.weight"].shape[0]
snake_case__ : Optional[Any] = MBartConfig.from_pretrained(snake_case_ , vocab_size=snake_case_ )
if mbart_aa and finetuned:
snake_case__ : Any = "relu"
snake_case__ : Optional[Any] = state_dict["decoder.embed_tokens.weight"]
snake_case__ : Dict = MBartForConditionalGeneration(snake_case_ )
model.model.load_state_dict(snake_case_ )
if finetuned:
snake_case__ : str = make_linear_from_emb(model.model.shared )
return model
if __name__ == "__main__":
__lowerCamelCase : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""fairseq_path""", type=str, help="""bart.large, bart.large.cnn or a path to a model.pt on local filesystem."""
)
parser.add_argument("""pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--hf_config""",
default="""facebook/mbart-large-cc25""",
type=str,
help="""Which huggingface architecture to use: mbart-large""",
)
parser.add_argument("""--mbart_50""", action="""store_true""", help="""whether the model is mMART-50 checkpoint""")
parser.add_argument("""--finetuned""", action="""store_true""", help="""whether the model is a fine-tuned checkpoint""")
__lowerCamelCase : Optional[int] = parser.parse_args()
__lowerCamelCase : Union[str, Any] = convert_fairseq_mbart_checkpoint_from_disk(
args.fairseq_path, hf_config_path=args.hf_config, finetuned=args.finetuned, mbart_aa=args.mbart_aa
)
model.save_pretrained(args.pytorch_dump_folder_path)
| 25 |
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
__lowerCamelCase : List[str] = logging.get_logger(__name__)
__lowerCamelCase : Optional[Any] = {"""vocab_file""": """vocab.json""", """merges_file""": """merges.txt"""}
# See all LED models at https://huggingface.co/models?filter=LED
__lowerCamelCase : Tuple = {
"""vocab_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json""",
},
"""merges_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt""",
},
"""tokenizer_file""": {
"""allenai/led-base-16384""": """https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json""",
},
}
__lowerCamelCase : Dict = {
"""allenai/led-base-16384""": 1_6384,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = (
list(range(ord("!" ) , ord("~" ) + 1 ) ) + list(range(ord("¡" ) , ord("¬" ) + 1 ) ) + list(range(ord("®" ) , ord("ÿ" ) + 1 ) )
)
snake_case__ : Optional[int] = bs[:]
snake_case__ : Any = 0
for b in range(2**8 ):
if b not in bs:
bs.append(snake_case_ )
cs.append(2**8 + n )
n += 1
snake_case__ : Dict = [chr(snake_case_ ) for n in cs]
return dict(zip(snake_case_ , snake_case_ ) )
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] ):
snake_case__ : Dict = set()
snake_case__ : Tuple = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
snake_case__ : List[Any] = char
return pairs
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ ):
"""simple docstring"""
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["input_ids", "attention_mask"]
def __init__( self : List[str] , __A : Any , __A : List[str] , __A : Optional[Any]="replace" , __A : Optional[int]="<s>" , __A : Union[str, Any]="</s>" , __A : Tuple="</s>" , __A : List[Any]="<s>" , __A : Dict="<unk>" , __A : Any="<pad>" , __A : Optional[int]="<mask>" , __A : List[str]=False , **__A : Union[str, Any] , ):
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else bos_token
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else eos_token
snake_case__ : Any = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else sep_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else cls_token
snake_case__ : Tuple = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else unk_token
snake_case__ : List[Any] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
snake_case__ : List[str] = AddedToken(__A , lstrip=__A , rstrip=__A ) if isinstance(__A , __A ) else mask_token
super().__init__(
errors=__A , bos_token=__A , eos_token=__A , unk_token=__A , sep_token=__A , cls_token=__A , pad_token=__A , mask_token=__A , add_prefix_space=__A , **__A , )
with open(__A , encoding="utf-8" ) as vocab_handle:
snake_case__ : Any = json.load(__A )
snake_case__ : Optional[Any] = {v: k for k, v in self.encoder.items()}
snake_case__ : Union[str, Any] = errors # how to handle errors in decoding
snake_case__ : Any = bytes_to_unicode()
snake_case__ : Optional[Any] = {v: k for k, v in self.byte_encoder.items()}
with open(__A , encoding="utf-8" ) as merges_handle:
snake_case__ : str = merges_handle.read().split("\n" )[1:-1]
snake_case__ : int = [tuple(merge.split() ) for merge in bpe_merges]
snake_case__ : str = dict(zip(__A , range(len(__A ) ) ) )
snake_case__ : Optional[int] = {}
snake_case__ : Any = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
snake_case__ : Union[str, Any] = re.compile(R"'s|'t|'re|'ve|'m|'ll|'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+" )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def _lowercase ( self : List[Any] ):
return len(self.encoder )
def _lowercase ( self : Any ):
return dict(self.encoder , **self.added_tokens_encoder )
def _lowercase ( self : Optional[Any] , __A : Optional[int] ):
if token in self.cache:
return self.cache[token]
snake_case__ : Union[str, Any] = tuple(__A )
snake_case__ : List[Any] = get_pairs(__A )
if not pairs:
return token
while True:
snake_case__ : Tuple = min(__A , key=lambda __A : self.bpe_ranks.get(__A , float("inf" ) ) )
if bigram not in self.bpe_ranks:
break
snake_case__, snake_case__ : Dict = bigram
snake_case__ : str = []
snake_case__ : Union[str, Any] = 0
while i < len(__A ):
try:
snake_case__ : Dict = word.index(__A , __A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
snake_case__ : str = j
if word[i] == first and i < len(__A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
snake_case__ : str = tuple(__A )
snake_case__ : int = new_word
if len(__A ) == 1:
break
else:
snake_case__ : List[str] = get_pairs(__A )
snake_case__ : List[Any] = " ".join(__A )
snake_case__ : Optional[int] = word
return word
def _lowercase ( self : Optional[Any] , __A : Optional[Any] ):
snake_case__ : List[str] = []
for token in re.findall(self.pat , __A ):
snake_case__ : Dict = "".join(
self.byte_encoder[b] for b in token.encode("utf-8" ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(__A ).split(" " ) )
return bpe_tokens
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
return self.encoder.get(__A , self.encoder.get(self.unk_token ) )
def _lowercase ( self : Optional[int] , __A : Optional[Any] ):
return self.decoder.get(__A )
def _lowercase ( self : Union[str, Any] , __A : Dict ):
snake_case__ : Optional[Any] = "".join(__A )
snake_case__ : int = bytearray([self.byte_decoder[c] for c in text] ).decode("utf-8" , errors=self.errors )
return text
def _lowercase ( self : Optional[int] , __A : str , __A : Optional[str] = None ):
if not os.path.isdir(__A ):
logger.error(f'''Vocabulary path ({save_directory}) should be a directory''' )
return
snake_case__ : List[Any] = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["vocab_file"] )
snake_case__ : str = os.path.join(
__A , (filename_prefix + "-" if filename_prefix else "") + VOCAB_FILES_NAMES["merges_file"] )
with open(__A , "w" , encoding="utf-8" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__A , ensure_ascii=__A ) + "\n" )
snake_case__ : str = 0
with open(__A , "w" , encoding="utf-8" ) as writer:
writer.write("#version: 0.2\n" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __A : kv[1] ):
if index != token_index:
logger.warning(
f'''Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'''
" Please check that the tokenizer is not corrupted!" )
snake_case__ : int = token_index
writer.write(" ".join(__A ) + "\n" )
index += 1
return vocab_file, merge_file
def _lowercase ( self : int , __A : List[int] , __A : Optional[List[int]] = None ):
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
snake_case__ : Tuple = [self.cls_token_id]
snake_case__ : List[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _lowercase ( self : Optional[Any] , __A : List[int] , __A : Optional[List[int]] = None , __A : bool = False ):
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__A , token_ids_a=__A , already_has_special_tokens=__A )
if token_ids_a is None:
return [1] + ([0] * len(__A )) + [1]
return [1] + ([0] * len(__A )) + [1, 1] + ([0] * len(__A )) + [1]
def _lowercase ( self : List[Any] , __A : List[int] , __A : Optional[List[int]] = None ):
snake_case__ : Any = [self.sep_token_id]
snake_case__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _lowercase ( self : Optional[Any] , __A : int , __A : int=False , **__A : Dict ):
snake_case__ : Optional[int] = kwargs.pop("add_prefix_space" , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(__A ) > 0 and not text[0].isspace()):
snake_case__ : Optional[int] = " " + text
return (text, kwargs)
def _lowercase ( self : Any , __A : Union[Dict[str, EncodedInput], BatchEncoding] , __A : Optional[int] = None , __A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , __A : Optional[int] = None , __A : Optional[bool] = None , ):
snake_case__ : Optional[Any] = super()._pad(
encoded_inputs=__A , max_length=__A , padding_strategy=__A , pad_to_multiple_of=__A , return_attention_mask=__A , )
# Load from model defaults
if return_attention_mask is None:
snake_case__ : Union[str, Any] = "attention_mask" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
snake_case__ : Union[str, Any] = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
snake_case__ : Tuple = len(encoded_inputs["global_attention_mask"] ) != len(__A )
if needs_to_be_padded:
snake_case__ : int = len(__A ) - len(encoded_inputs["global_attention_mask"] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
snake_case__ : int = (
encoded_inputs["global_attention_mask"] + [-1] * difference
)
elif self.padding_side == "left":
snake_case__ : Tuple = [-1] * difference + encoded_inputs[
"global_attention_mask"
]
else:
raise ValueError("Invalid padding strategy:" + str(self.padding_side ) )
return encoded_inputs
| 25 | 1 |
import argparse
import re
import torch
from CLAP import create_model
from transformers import AutoFeatureExtractor, ClapConfig, ClapModel
__lowerCamelCase : str = {
"""text_branch""": """text_model""",
"""audio_branch""": """audio_model.audio_encoder""",
"""attn""": """attention.self""",
"""self.proj""": """output.dense""",
"""attention.self_mask""": """attn_mask""",
"""mlp.fc1""": """intermediate.dense""",
"""mlp.fc2""": """output.dense""",
"""norm1""": """layernorm_before""",
"""norm2""": """layernorm_after""",
"""bn0""": """batch_norm""",
}
__lowerCamelCase : Optional[int] = AutoFeatureExtractor.from_pretrained("""laion/clap-htsat-unfused""", truncation="""rand_trunc""")
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Tuple=False ):
snake_case__, snake_case__ : Any = create_model(
"HTSAT-tiny" , "roberta" , snake_case_ , precision="fp32" , device="cuda:0" if torch.cuda.is_available() else "cpu" , enable_fusion=snake_case_ , fusion_type="aff_2d" if enable_fusion else None , )
return model, model_cfg
def SCREAMING_SNAKE_CASE ( snake_case_ : Dict ):
snake_case__ : Any = {}
snake_case__ : Optional[Any] = R".*sequential.(\d+).*"
snake_case__ : Optional[Any] = R".*_projection.(\d+).*"
for key, value in state_dict.items():
# check if any key needs to be modified
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
snake_case__ : Dict = key.replace(snake_case_ , snake_case_ )
if re.match(snake_case_ , snake_case_ ):
# replace sequential layers with list
snake_case__ : Tuple = re.match(snake_case_ , snake_case_ ).group(1 )
snake_case__ : Union[str, Any] = key.replace(F'''sequential.{sequential_layer}.''' , F'''layers.{int(snake_case_ )//3}.linear.''' )
elif re.match(snake_case_ , snake_case_ ):
snake_case__ : str = int(re.match(snake_case_ , snake_case_ ).group(1 ) )
# Because in CLAP they use `nn.Sequential`...
snake_case__ : Union[str, Any] = 1 if projecton_layer == 0 else 2
snake_case__ : Tuple = key.replace(F'''_projection.{projecton_layer}.''' , F'''_projection.linear{transformers_projection_layer}.''' )
if "audio" and "qkv" in key:
# split qkv into query key and value
snake_case__ : int = value
snake_case__ : Optional[Any] = mixed_qkv.size(0 ) // 3
snake_case__ : List[Any] = mixed_qkv[:qkv_dim]
snake_case__ : Union[str, Any] = mixed_qkv[qkv_dim : qkv_dim * 2]
snake_case__ : Optional[Any] = mixed_qkv[qkv_dim * 2 :]
snake_case__ : List[str] = query_layer
snake_case__ : Optional[int] = key_layer
snake_case__ : List[Any] = value_layer
else:
snake_case__ : Tuple = value
return model_state_dict
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Optional[Any] , snake_case_ : List[Any] , snake_case_ : Union[str, Any]=False ):
snake_case__, snake_case__ : List[Any] = init_clap(snake_case_ , enable_fusion=snake_case_ )
clap_model.eval()
snake_case__ : List[Any] = clap_model.state_dict()
snake_case__ : List[Any] = rename_state_dict(snake_case_ )
snake_case__ : str = ClapConfig()
snake_case__ : Optional[Any] = enable_fusion
snake_case__ : str = ClapModel(snake_case_ )
# ignore the spectrogram embedding layer
model.load_state_dict(snake_case_ , strict=snake_case_ )
model.save_pretrained(snake_case_ )
transformers_config.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : str = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument("""--enable_fusion""", action="""store_true""", help="""Whether to enable fusion or not""")
__lowerCamelCase : Optional[int] = parser.parse_args()
convert_clap_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.enable_fusion)
| 25 |
# tests directory-specific settings - this file is run automatically
# by pytest before any tests are run
import sys
import warnings
from os.path import abspath, dirname, join
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
__lowerCamelCase : Dict = abspath(join(dirname(dirname(__file__)), """src"""))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action="""ignore""", category=FutureWarning)
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
from diffusers.utils.testing_utils import pytest_addoption_shared
pytest_addoption_shared(snake_case_ )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any ):
from diffusers.utils.testing_utils import pytest_terminal_summary_main
snake_case__ : Optional[int] = terminalreporter.config.getoption("--make-reports" )
if make_reports:
pytest_terminal_summary_main(snake_case_ , id=snake_case_ )
| 25 | 1 |
from __future__ import annotations
import typing
from collections import Counter
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
snake_case__ : typing.Counter[int] = Counter()
for base in range(1 , max_perimeter + 1 ):
for perpendicular in range(snake_case_ , max_perimeter + 1 ):
snake_case__ : str = (base * base + perpendicular * perpendicular) ** 0.5
if hypotenuse == int(snake_case_ ):
snake_case__ : List[str] = int(base + perpendicular + hypotenuse )
if perimeter > max_perimeter:
continue
triplets[perimeter] += 1
return triplets
def SCREAMING_SNAKE_CASE ( snake_case_ : int = 1000 ):
snake_case__ : List[str] = pythagorean_triple(snake_case_ )
return triplets.most_common(1 )[0][0]
if __name__ == "__main__":
print(f"Perimeter {solution()} has maximum solutions")
| 25 |
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Any = [0] * len(snake_case_ )
for i in range(1 , len(snake_case_ ) ):
# use last results for better performance - dynamic programming
snake_case__ : Union[str, Any] = prefix_result[i - 1]
while j > 0 and input_string[i] != input_string[j]:
snake_case__ : str = prefix_result[j - 1]
if input_string[i] == input_string[j]:
j += 1
snake_case__ : int = j
return prefix_result
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
return max(prefix_function(snake_case_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 | 1 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
__lowerCamelCase : List[str] = 16
__lowerCamelCase : Any = 32
def SCREAMING_SNAKE_CASE ( snake_case_ : Accelerator , snake_case_ : int = 16 , snake_case_ : str = "bert-base-cased" ):
snake_case__ : List[Any] = AutoTokenizer.from_pretrained(snake_case_ )
snake_case__ : Union[str, Any] = load_dataset("glue" , "mrpc" )
def tokenize_function(snake_case_ : Optional[int] ):
# max_length=None => use the model max length (it's actually the default)
snake_case__ : Tuple = tokenizer(examples["sentence1"] , examples["sentence2"] , truncation=snake_case_ , max_length=snake_case_ )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
snake_case__ : Tuple = datasets.map(
snake_case_ , batched=snake_case_ , remove_columns=["idx", "sentence1", "sentence2"] , load_from_cache_file=snake_case_ )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
snake_case__ : Optional[Any] = tokenized_datasets.rename_column("label" , "labels" )
def collate_fn(snake_case_ : List[Any] ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(snake_case_ , padding="max_length" , max_length=128 , return_tensors="pt" )
return tokenizer.pad(snake_case_ , padding="longest" , return_tensors="pt" )
# Instantiate dataloaders.
snake_case__ : Union[str, Any] = DataLoader(
tokenized_datasets["train"] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
snake_case__ : List[str] = DataLoader(
tokenized_datasets["validation"] , shuffle=snake_case_ , collate_fn=snake_case_ , batch_size=snake_case_ )
return train_dataloader, eval_dataloader
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : int ):
# Initialize accelerator
snake_case__ : List[str] = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
snake_case__ : Union[str, Any] = config["lr"]
snake_case__ : Optional[int] = int(config["num_epochs"] )
snake_case__ : Dict = int(config["seed"] )
snake_case__ : str = int(config["batch_size"] )
snake_case__ : Dict = args.model_name_or_path
set_seed(snake_case_ )
snake_case__, snake_case__ : Optional[Any] = get_dataloaders(snake_case_ , snake_case_ , snake_case_ )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
snake_case__ : Tuple = AutoModelForSequenceClassification.from_pretrained(snake_case_ , return_dict=snake_case_ )
# Instantiate optimizer
snake_case__ : Union[str, Any] = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
snake_case__ : int = optimizer_cls(params=model.parameters() , lr=snake_case_ )
if accelerator.state.deepspeed_plugin is not None:
snake_case__ : str = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
snake_case__ : List[Any] = 1
snake_case__ : Tuple = (len(snake_case_ ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
snake_case__ : List[str] = get_linear_schedule_with_warmup(
optimizer=snake_case_ , num_warmup_steps=0 , num_training_steps=snake_case_ , )
else:
snake_case__ : List[Any] = DummyScheduler(snake_case_ , total_num_steps=snake_case_ , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
snake_case__, snake_case__, snake_case__, snake_case__, snake_case__ : Any = accelerator.prepare(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
# We need to keep track of how many total steps we have iterated over
snake_case__ : Tuple = 0
# We also need to keep track of the stating epoch so files are named properly
snake_case__ : str = 0
# Now we train the model
snake_case__ : int = evaluate.load("glue" , "mrpc" )
snake_case__ : List[Any] = 0
snake_case__ : List[str] = {}
for epoch in range(snake_case_ , snake_case_ ):
model.train()
for step, batch in enumerate(snake_case_ ):
snake_case__ : str = model(**snake_case_ )
snake_case__ : Optional[Any] = outputs.loss
snake_case__ : Dict = loss / gradient_accumulation_steps
accelerator.backward(snake_case_ )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
snake_case__ : Dict = 0
for step, batch in enumerate(snake_case_ ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
snake_case__ : Optional[int] = model(**snake_case_ )
snake_case__ : Any = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
snake_case__, snake_case__ : Tuple = accelerator.gather(
(predictions, batch["labels"]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(snake_case_ ) - 1:
snake_case__ : Tuple = predictions[: len(eval_dataloader.dataset ) - samples_seen]
snake_case__ : List[Any] = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=snake_case_ , references=snake_case_ , )
snake_case__ : Optional[int] = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'''epoch {epoch}:''' , snake_case_ )
snake_case__ : str = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
snake_case__ : List[str] = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'''Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'''
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , "all_results.json" ) , "w" ) as f:
json.dump(snake_case_ , snake_case_ )
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = argparse.ArgumentParser(description="Simple example of training script tracking peak GPU memory usage." )
parser.add_argument(
"--model_name_or_path" , type=snake_case_ , default="bert-base-cased" , help="Path to pretrained model or model identifier from huggingface.co/models." , required=snake_case_ , )
parser.add_argument(
"--output_dir" , type=snake_case_ , default="." , help="Optional save directory where all checkpoint folders will be stored. Default is the current working directory." , )
parser.add_argument(
"--performance_lower_bound" , type=snake_case_ , default=snake_case_ , help="Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value." , )
parser.add_argument(
"--num_epochs" , type=snake_case_ , default=3 , help="Number of train epochs." , )
snake_case__ : List[Any] = parser.parse_args()
snake_case__ : str = {"lr": 2E-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(snake_case_ , snake_case_ )
if __name__ == "__main__":
main()
| 25 |
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
__lowerCamelCase : Optional[int] = get_logger()
__lowerCamelCase : Optional[dict] = None
class SCREAMING_SNAKE_CASE__ ( TensorFormatter[Mapping, "jax.Array", Mapping] ):
"""simple docstring"""
def __init__( self : Optional[Any] , __A : Dict=None , __A : List[str]=None , **__A : str ):
super().__init__(features=__A )
import jax
from jaxlib.xla_client import Device
if isinstance(__A , __A ):
raise ValueError(
f'''Expected {device} to be a `str` not {type(__A )}, as `jaxlib.xla_extension.Device` '''
"is not serializable neither with `pickle` nor with `dill`. Instead you can surround "
"the device with `str()` to get its string identifier that will be internally mapped "
"to the actual `jaxlib.xla_extension.Device`." )
snake_case__ : List[Any] = device if isinstance(__A , __A ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : Any = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f'''Device with string identifier {self.device} not listed among the available '''
f'''devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default '''
f'''device: {str(jax.devices()[0] )}.''' )
snake_case__ : str = str(jax.devices()[0] )
snake_case__ : str = jnp_array_kwargs
@staticmethod
def _lowercase ( ):
import jax
return {str(__A ): device for device in jax.devices()}
def _lowercase ( self : Optional[Any] , __A : str ):
import jax
import jax.numpy as jnp
if isinstance(__A , __A ) and column:
if all(
isinstance(__A , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__A , axis=0 )
return column
def _lowercase ( self : int , __A : Tuple ):
import jax
import jax.numpy as jnp
if isinstance(__A , (str, bytes, type(__A )) ):
return value
elif isinstance(__A , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
snake_case__ : Optional[int] = {}
if isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
snake_case__ : Any = {"dtype": jnp.intaa}
else:
snake_case__ : Tuple = {"dtype": jnp.intaa}
elif isinstance(__A , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
snake_case__ : str = {"dtype": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__A , PIL.Image.Image ):
snake_case__ : Optional[Any] = np.asarray(__A )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
snake_case__ : int = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__A , **{**default_dtype, **self.jnp_array_kwargs} )
def _lowercase ( self : Union[str, Any] , __A : Optional[int] ):
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__A , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__A , "__array__" ) and not isinstance(__A , jax.Array ):
snake_case__ : Union[str, Any] = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__A , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
elif isinstance(__A , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__A ) for substruct in data_struct] )
return self._tensorize(__A )
def _lowercase ( self : Tuple , __A : dict ):
return map_nested(self._recursive_tensorize , __A , map_list=__A )
def _lowercase ( self : Optional[int] , __A : pa.Table ):
snake_case__ : int = self.numpy_arrow_extractor().extract_row(__A )
snake_case__ : Tuple = self.python_features_decoder.decode_row(__A )
return self.recursive_tensorize(__A )
def _lowercase ( self : Optional[Any] , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_column(__A )
snake_case__ : Optional[int] = self.python_features_decoder.decode_column(__A , pa_table.column_names[0] )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
snake_case__ : Dict = self._consolidate(__A )
return column
def _lowercase ( self : str , __A : pa.Table ):
snake_case__ : Any = self.numpy_arrow_extractor().extract_batch(__A )
snake_case__ : int = self.python_features_decoder.decode_batch(__A )
snake_case__ : List[Any] = self.recursive_tensorize(__A )
for column_name in batch:
snake_case__ : Any = self._consolidate(batch[column_name] )
return batch
| 25 | 1 |
from __future__ import annotations
from collections.abc import Callable
__lowerCamelCase : Union[str, Any] = list[list[float | int]]
def SCREAMING_SNAKE_CASE ( snake_case_ : Matrix , snake_case_ : Matrix ):
snake_case__ : int = len(snake_case_ )
snake_case__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(snake_case_ )]
snake_case__ : int
snake_case__ : int
snake_case__ : int
snake_case__ : int
snake_case__ : int
snake_case__ : float
for row in range(snake_case_ ):
for col in range(snake_case_ ):
snake_case__ : Union[str, Any] = matrix[row][col]
snake_case__ : Any = vector[row][0]
snake_case__ : Any = 0
snake_case__ : Optional[Any] = 0
while row < size and col < size:
# pivoting
snake_case__ : Union[str, Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(snake_case_ , snake_case_ ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
snake_case__, snake_case__ : List[Any] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , snake_case_ ):
snake_case__ : Dict = augmented[rowa][col] / augmented[row][col]
snake_case__ : Optional[Any] = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , snake_case_ ):
for row in range(snake_case_ ):
snake_case__ : Any = augmented[row][col] / augmented[col][col]
for cola in range(snake_case_ , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(snake_case_ )
]
def SCREAMING_SNAKE_CASE ( snake_case_ : list[int] ):
snake_case__ : int = len(snake_case_ )
snake_case__ : Matrix = [[0 for _ in range(snake_case_ )] for _ in range(snake_case_ )]
snake_case__ : Matrix = [[0] for _ in range(snake_case_ )]
snake_case__ : Matrix
snake_case__ : int
snake_case__ : int
snake_case__ : int
for x_val, y_val in enumerate(snake_case_ ):
for col in range(snake_case_ ):
snake_case__ : Tuple = (x_val + 1) ** (size - col - 1)
snake_case__ : Tuple = y_val
snake_case__ : Any = solve(snake_case_ , snake_case_ )
def interpolated_func(snake_case_ : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(snake_case_ ) )
return interpolated_func
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def SCREAMING_SNAKE_CASE ( snake_case_ : Callable[[int], int] = question_function , snake_case_ : int = 10 ):
snake_case__ : list[int] = [func(snake_case_ ) for x_val in range(1 , order + 1 )]
snake_case__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
snake_case__ : int = 0
snake_case__ : Callable[[int], int]
snake_case__ : int
for poly in polynomials:
snake_case__ : Tuple = 1
while func(snake_case_ ) == poly(snake_case_ ):
x_val += 1
ret += poly(snake_case_ )
return ret
if __name__ == "__main__":
print(f"{solution() = }")
| 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
__lowerCamelCase : Tuple = {
"""configuration_roberta_prelayernorm""": [
"""ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP""",
"""RobertaPreLayerNormConfig""",
"""RobertaPreLayerNormOnnxConfig""",
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Tuple = [
"""ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""RobertaPreLayerNormForCausalLM""",
"""RobertaPreLayerNormForMaskedLM""",
"""RobertaPreLayerNormForMultipleChoice""",
"""RobertaPreLayerNormForQuestionAnswering""",
"""RobertaPreLayerNormForSequenceClassification""",
"""RobertaPreLayerNormForTokenClassification""",
"""RobertaPreLayerNormModel""",
"""RobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Union[str, Any] = [
"""TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFRobertaPreLayerNormForCausalLM""",
"""TFRobertaPreLayerNormForMaskedLM""",
"""TFRobertaPreLayerNormForMultipleChoice""",
"""TFRobertaPreLayerNormForQuestionAnswering""",
"""TFRobertaPreLayerNormForSequenceClassification""",
"""TFRobertaPreLayerNormForTokenClassification""",
"""TFRobertaPreLayerNormMainLayer""",
"""TFRobertaPreLayerNormModel""",
"""TFRobertaPreLayerNormPreTrainedModel""",
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : List[Any] = [
"""FlaxRobertaPreLayerNormForCausalLM""",
"""FlaxRobertaPreLayerNormForMaskedLM""",
"""FlaxRobertaPreLayerNormForMultipleChoice""",
"""FlaxRobertaPreLayerNormForQuestionAnswering""",
"""FlaxRobertaPreLayerNormForSequenceClassification""",
"""FlaxRobertaPreLayerNormForTokenClassification""",
"""FlaxRobertaPreLayerNormModel""",
"""FlaxRobertaPreLayerNormPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_CONFIG_ARCHIVE_MAP,
RobertaPreLayerNormConfig,
RobertaPreLayerNormOnnxConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_roberta_prelayernorm import (
ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
RobertaPreLayerNormForCausalLM,
RobertaPreLayerNormForMaskedLM,
RobertaPreLayerNormForMultipleChoice,
RobertaPreLayerNormForQuestionAnswering,
RobertaPreLayerNormForSequenceClassification,
RobertaPreLayerNormForTokenClassification,
RobertaPreLayerNormModel,
RobertaPreLayerNormPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_roberta_prelayernorm import (
TF_ROBERTA_PRELAYERNORM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFRobertaPreLayerNormForCausalLM,
TFRobertaPreLayerNormForMaskedLM,
TFRobertaPreLayerNormForMultipleChoice,
TFRobertaPreLayerNormForQuestionAnswering,
TFRobertaPreLayerNormForSequenceClassification,
TFRobertaPreLayerNormForTokenClassification,
TFRobertaPreLayerNormMainLayer,
TFRobertaPreLayerNormModel,
TFRobertaPreLayerNormPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_roberta_prelayernorm import (
FlaxRobertaPreLayerNormForCausalLM,
FlaxRobertaPreLayerNormForMaskedLM,
FlaxRobertaPreLayerNormForMultipleChoice,
FlaxRobertaPreLayerNormForQuestionAnswering,
FlaxRobertaPreLayerNormForSequenceClassification,
FlaxRobertaPreLayerNormForTokenClassification,
FlaxRobertaPreLayerNormModel,
FlaxRobertaPreLayerNormPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Dict = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
__lowerCamelCase : Union[str, Any] = {
"""configuration_falcon""": ["""FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP""", """FalconConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""FALCON_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""FalconForCausalLM""",
"""FalconModel""",
"""FalconPreTrainedModel""",
"""FalconForSequenceClassification""",
"""FalconForTokenClassification""",
"""FalconForQuestionAnswering""",
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
__lowerCamelCase : str = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 |
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.activations import gelu_new, gelu_python, get_activation
@require_torch
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
def _lowercase ( self : Tuple ):
snake_case__ : List[str] = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Tuple = get_activation("gelu" )
self.assertTrue(torch.allclose(gelu_python(__A ) , torch_builtin(__A ) ) )
self.assertFalse(torch.allclose(gelu_python(__A ) , gelu_new(__A ) ) )
def _lowercase ( self : Dict ):
snake_case__ : str = torch.tensor([-1_0_0, -1, -0.1, 0, 0.1, 1.0, 1_0_0] )
snake_case__ : Union[str, Any] = get_activation("gelu" )
snake_case__ : int = get_activation("gelu_10" )
snake_case__ : Optional[int] = torch_builtin(__A )
snake_case__ : Dict = geluaa(__A )
snake_case__ : Optional[Any] = torch.where(y_gelu_aa < 1_0.0 , 1 , 0 )
self.assertTrue(torch.max(__A ).item() == 1_0.0 )
self.assertTrue(torch.allclose(y_gelu * clipped_mask , y_gelu_aa * clipped_mask ) )
def _lowercase ( self : str ):
get_activation("gelu" )
get_activation("gelu_10" )
get_activation("gelu_fast" )
get_activation("gelu_new" )
get_activation("gelu_python" )
get_activation("gelu_pytorch_tanh" )
get_activation("linear" )
get_activation("mish" )
get_activation("quick_gelu" )
get_activation("relu" )
get_activation("sigmoid" )
get_activation("silu" )
get_activation("swish" )
get_activation("tanh" )
with self.assertRaises(__A ):
get_activation("bogus" )
with self.assertRaises(__A ):
get_activation(__A )
def _lowercase ( self : List[str] ):
snake_case__ : List[str] = get_activation("gelu" )
snake_case__ : Any = 1
snake_case__ : Union[str, Any] = get_activation("gelu" )
self.assertEqual(acta.a , 1 )
with self.assertRaises(__A ):
snake_case__ : int = acta.a
| 25 | 1 |
# NOTE: This file is deprecated and will be removed in a future version.
# It only exists so that temporarely `from diffusers.pipelines import DiffusionPipeline` works
from ...utils import deprecate
from ..controlnet.pipeline_flax_controlnet import FlaxStableDiffusionControlNetPipeline # noqa: F401
deprecate(
"""stable diffusion controlnet""",
"""0.22.0""",
"""Importing `FlaxStableDiffusionControlNetPipeline` from diffusers.pipelines.stable_diffusion.flax_pipeline_stable_diffusion_controlnet is deprecated. Please import `from diffusers import FlaxStableDiffusionControlNetPipeline` instead.""",
standard_warn=False,
stacklevel=3,
)
| 25 |
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__lowerCamelCase : int = logging.get_logger(__name__)
__lowerCamelCase : int = {
"""post_extract_proj""": """feature_projection.projection""",
"""encoder.pos_conv.0""": """encoder.pos_conv_embed.conv""",
"""self_attn.k_proj""": """encoder.layers.*.attention.k_proj""",
"""self_attn.v_proj""": """encoder.layers.*.attention.v_proj""",
"""self_attn.q_proj""": """encoder.layers.*.attention.q_proj""",
"""self_attn.out_proj""": """encoder.layers.*.attention.out_proj""",
"""self_attn_layer_norm""": """encoder.layers.*.layer_norm""",
"""fc1""": """encoder.layers.*.feed_forward.intermediate_dense""",
"""fc2""": """encoder.layers.*.feed_forward.output_dense""",
"""final_layer_norm""": """encoder.layers.*.final_layer_norm""",
"""encoder.layer_norm""": """encoder.layer_norm""",
"""encoder.layer_norm_for_extract""": """layer_norm_for_extract""",
"""w2v_model.layer_norm""": """feature_projection.layer_norm""",
"""quantizer.weight_proj""": """quantizer.weight_proj""",
"""quantizer.vars""": """quantizer.codevectors""",
"""project_q""": """project_q""",
"""final_proj""": """project_hid""",
"""w2v_encoder.proj""": """lm_head""",
"""label_embs_concat""": """label_embeddings_concat""",
"""mask_emb""": """masked_spec_embed""",
"""spk_proj""": """speaker_proj""",
}
__lowerCamelCase : Tuple = [
"""lm_head""",
"""quantizer.weight_proj""",
"""quantizer.codevectors""",
"""project_q""",
"""project_hid""",
"""label_embeddings_concat""",
"""speaker_proj""",
"""layer_norm_for_extract""",
]
def SCREAMING_SNAKE_CASE ( snake_case_ : Tuple , snake_case_ : Union[str, Any] , snake_case_ : Union[str, Any] , snake_case_ : Any , snake_case_ : Union[str, Any] ):
for attribute in key.split("." ):
snake_case__ : int = getattr(snake_case_ , snake_case_ )
if weight_type is not None:
snake_case__ : Optional[Any] = getattr(snake_case_ , snake_case_ ).shape
else:
snake_case__ : List[str] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
snake_case__ : str = value
elif weight_type == "weight_g":
snake_case__ : Union[str, Any] = value
elif weight_type == "weight_v":
snake_case__ : Optional[Any] = value
elif weight_type == "bias":
snake_case__ : str = value
else:
snake_case__ : Union[str, Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : Union[str, Any] ):
snake_case__ : str = []
snake_case__ : Optional[int] = fairseq_model.state_dict()
snake_case__ : int = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
snake_case__ : Dict = False
if "conv_layers" in name:
load_conv_layer(
snake_case_ , snake_case_ , snake_case_ , snake_case_ , hf_model.config.feat_extract_norm == "group" , )
snake_case__ : str = True
else:
for key, mapped_key in MAPPING.items():
snake_case__ : Optional[int] = "unispeech_sat." + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split("w2v_model." )[-1] == name.split("." )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split("." )[:-1] ) != key):
# special case since naming is very similar
continue
snake_case__ : int = True
if "*" in mapped_key:
snake_case__ : Any = name.split(snake_case_ )[0].split("." )[-2]
snake_case__ : Any = mapped_key.replace("*" , snake_case_ )
if "weight_g" in name:
snake_case__ : List[Any] = "weight_g"
elif "weight_v" in name:
snake_case__ : Optional[Any] = "weight_v"
elif "bias" in name:
snake_case__ : Optional[Any] = "bias"
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
snake_case__ : Optional[Any] = "weight"
else:
snake_case__ : Optional[Any] = None
set_recursively(snake_case_ , snake_case_ , snake_case_ , snake_case_ , snake_case_ )
continue
if not is_used:
unused_weights.append(snake_case_ )
logger.warning(F'''Unused weights: {unused_weights}''' )
def SCREAMING_SNAKE_CASE ( snake_case_ : Any , snake_case_ : List[str] , snake_case_ : List[Any] , snake_case_ : Optional[Any] , snake_case_ : str ):
snake_case__ : Tuple = full_name.split("conv_layers." )[-1]
snake_case__ : Union[str, Any] = name.split("." )
snake_case__ : str = int(items[0] )
snake_case__ : str = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
snake_case__ : Any = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
snake_case__ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
snake_case__ : int = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(snake_case_ )
@torch.no_grad()
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : Any , snake_case_ : Optional[int]=None , snake_case_ : Optional[int]=None , snake_case_ : Any=True ):
if config_path is not None:
snake_case__ : Tuple = UniSpeechSatConfig.from_pretrained(snake_case_ )
else:
snake_case__ : Tuple = UniSpeechSatConfig()
snake_case__ : str = ""
if is_finetuned:
snake_case__ : Tuple = UniSpeechSatForCTC(snake_case_ )
else:
snake_case__ : Any = UniSpeechSatForPreTraining(snake_case_ )
snake_case__, snake_case__, snake_case__ : Any = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={"data": "/".join(dict_path.split("/" )[:-1] )} )
snake_case__ : Tuple = model[0].eval()
recursively_load_weights(snake_case_ , snake_case_ )
hf_wavavec.save_pretrained(snake_case_ )
if __name__ == "__main__":
__lowerCamelCase : int = argparse.ArgumentParser()
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument("""--checkpoint_path""", default=None, type=str, help="""Path to fairseq checkpoint""")
parser.add_argument("""--dict_path""", default=None, type=str, help="""Path to dict of fine-tuned model""")
parser.add_argument("""--config_path""", default=None, type=str, help="""Path to hf config.json of model to convert""")
parser.add_argument(
"""--not_finetuned""", action="""store_true""", help="""Whether the model to convert is a fine-tuned model or not"""
)
__lowerCamelCase : List[Any] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 25 | 1 |
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : int ):
snake_case__ : list[list[str]] = [[] for _ in range(snake_case_ )]
snake_case__ : Optional[int] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1 or len(snake_case_ ) <= key:
return input_string
for position, character in enumerate(snake_case_ ):
snake_case__ : Union[str, Any] = position % (lowest * 2) # puts it in bounds
snake_case__ : Dict = min(snake_case_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append(snake_case_ )
snake_case__ : Tuple = ["".join(snake_case_ ) for row in temp_grid]
snake_case__ : List[str] = "".join(snake_case_ )
return output_string
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : int ):
snake_case__ : Any = []
snake_case__ : Optional[Any] = key - 1
if key <= 0:
raise ValueError("Height of grid can't be 0 or negative" )
if key == 1:
return input_string
snake_case__ : list[list[str]] = [[] for _ in range(snake_case_ )] # generates template
for position in range(len(snake_case_ ) ):
snake_case__ : Optional[Any] = position % (lowest * 2) # puts it in bounds
snake_case__ : str = min(snake_case_ , lowest * 2 - num ) # creates zigzag pattern
temp_grid[num].append("*" )
snake_case__ : int = 0
for row in temp_grid: # fills in the characters
snake_case__ : Dict = input_string[counter : counter + len(snake_case_ )]
grid.append(list(snake_case_ ) )
counter += len(snake_case_ )
snake_case__ : Dict = "" # reads as zigzag
for position in range(len(snake_case_ ) ):
snake_case__ : List[Any] = position % (lowest * 2) # puts it in bounds
snake_case__ : Tuple = min(snake_case_ , lowest * 2 - num ) # creates zigzag pattern
output_string += grid[num][0]
grid[num].pop(0 )
return output_string
def SCREAMING_SNAKE_CASE ( snake_case_ : str ):
snake_case__ : Optional[int] = {}
for key_guess in range(1 , len(snake_case_ ) ): # tries every key
snake_case__ : Tuple = decrypt(snake_case_ , snake_case_ )
return results
if __name__ == "__main__":
import doctest
doctest.testmod()
| 25 |
import copy
import tempfile
import unittest
from transformers import MaMaaaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from transformers.utils import cached_property
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaMaaaForConditionalGeneration, MaMaaaModel, MaMaaaTokenizer
from transformers.models.mam_aaa.modeling_mam_aaa import MaMaaaDecoder, MaMaaaEncoder
def SCREAMING_SNAKE_CASE ( snake_case_ : str , snake_case_ : Dict , snake_case_ : List[Any] , snake_case_ : Dict=None , snake_case_ : Tuple=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , snake_case_ : List[str]=None , ):
if attention_mask is None:
snake_case__ : Any = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
snake_case__ : List[Any] = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
snake_case__ : str = torch.ones(config.encoder_layers , config.encoder_attention_heads , device=snake_case_ )
if decoder_head_mask is None:
snake_case__ : Optional[int] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
if cross_attn_head_mask is None:
snake_case__ : Union[str, Any] = torch.ones(config.decoder_layers , config.decoder_attention_heads , device=snake_case_ )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : List[str] , __A : Any , __A : List[str]=1_3 , __A : List[Any]=7 , __A : Union[str, Any]=True , __A : Union[str, Any]=False , __A : str=9_9 , __A : Optional[Any]=1_6 , __A : Optional[Any]=2 , __A : Any=4 , __A : List[Any]=4 , __A : int="relu" , __A : Optional[int]=0.1 , __A : Tuple=0.1 , __A : Optional[int]=0.0 , __A : Optional[Any]=0.0 , __A : List[Any]=2_0 , __A : Optional[Any]=2 , __A : int=1 , __A : Union[str, Any]=0 , ):
snake_case__ : Optional[Any] = parent
snake_case__ : List[str] = batch_size
snake_case__ : Union[str, Any] = seq_length
snake_case__ : Optional[Any] = is_training
snake_case__ : List[str] = use_labels
snake_case__ : Tuple = vocab_size
snake_case__ : Optional[Any] = hidden_size
snake_case__ : Union[str, Any] = num_hidden_layers
snake_case__ : List[Any] = num_attention_heads
snake_case__ : Tuple = intermediate_size
snake_case__ : str = hidden_act
snake_case__ : Optional[Any] = hidden_dropout_prob
snake_case__ : int = attention_probs_dropout_prob
snake_case__ : int = encoder_layerdrop
snake_case__ : Tuple = decoder_layerdrop
snake_case__ : List[str] = max_position_embeddings
snake_case__ : Tuple = eos_token_id
snake_case__ : Dict = pad_token_id
snake_case__ : str = bos_token_id
def _lowercase ( self : Tuple ):
snake_case__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
snake_case__ : Union[str, Any] = self.eos_token_id # Eos Token
snake_case__ : str = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for M2M100 the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
snake_case__ : int = input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Optional[Any] = decoder_input_ids.clamp(self.pad_token_id + 1 )
snake_case__ : Union[str, Any] = self.get_config()
snake_case__ : Union[str, Any] = prepare_mam_aaa_inputs_dict(__A , __A , __A )
return config, inputs_dict
def _lowercase ( self : Dict ):
return MaMaaaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , encoder_layerdrop=self.encoder_layerdrop , decoder_layerdrop=self.decoder_layerdrop , max_position_embeddings=self.max_position_embeddings , eos_token_id=self.eos_token_id , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , )
def _lowercase ( self : List[str] ):
snake_case__, snake_case__ : Any = self.prepare_config_and_inputs()
return config, inputs_dict
def _lowercase ( self : Optional[Any] , __A : int , __A : Dict ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).get_decoder().to(__A ).eval()
snake_case__ : List[Any] = inputs_dict["input_ids"]
snake_case__ : Optional[Any] = inputs_dict["attention_mask"]
snake_case__ : Union[str, Any] = inputs_dict["head_mask"]
# first forward pass
snake_case__ : Dict = model(__A , attention_mask=__A , head_mask=__A , use_cache=__A )
snake_case__, snake_case__ : Dict = outputs.to_tuple()
# create hypothetical multiple next token and extent to next_input_ids
snake_case__ : int = ids_tensor((self.batch_size, 3) , config.vocab_size )
snake_case__ : List[str] = ids_tensor((self.batch_size, 3) , 2 )
# append to next input_ids and
snake_case__ : Union[str, Any] = torch.cat([input_ids, next_tokens] , dim=-1 )
snake_case__ : List[Any] = torch.cat([attention_mask, next_attn_mask] , dim=-1 )
snake_case__ : Tuple = model(__A , attention_mask=__A )["last_hidden_state"]
snake_case__ : Tuple = model(__A , attention_mask=__A , past_key_values=__A )[
"last_hidden_state"
]
# select random slice
snake_case__ : Optional[Any] = ids_tensor((1,) , output_from_past.shape[-1] ).item()
snake_case__ : Optional[Any] = output_from_no_past[:, -3:, random_slice_idx].detach()
snake_case__ : Any = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__A , __A , atol=1e-2 ) )
def _lowercase ( self : str , __A : Dict , __A : Optional[Any] ):
snake_case__ : Union[str, Any] = MaMaaaModel(config=__A ).to(__A ).eval()
snake_case__ : Union[str, Any] = model(**__A )
snake_case__ : Tuple = outputs.encoder_last_hidden_state
snake_case__ : Union[str, Any] = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_encoder()
encoder.save_pretrained(__A )
snake_case__ : Any = MaMaaaEncoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = encoder(inputs_dict["input_ids"] , attention_mask=inputs_dict["attention_mask"] )[
0
]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
with tempfile.TemporaryDirectory() as tmpdirname:
snake_case__ : Dict = model.get_decoder()
decoder.save_pretrained(__A )
snake_case__ : Optional[Any] = MaMaaaDecoder.from_pretrained(__A ).to(__A )
snake_case__ : List[str] = decoder(
input_ids=inputs_dict["decoder_input_ids"] , attention_mask=inputs_dict["decoder_attention_mask"] , encoder_hidden_states=__A , encoder_attention_mask=inputs_dict["attention_mask"] , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class SCREAMING_SNAKE_CASE__ ( UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ , unittest.TestCase ):
"""simple docstring"""
a_ = (
(
MaMaaaModel,
MaMaaaForConditionalGeneration,
)
if is_torch_available()
else ()
)
a_ = (MaMaaaForConditionalGeneration,) if is_torch_available() else ()
a_ = (
{
"conversational": MaMaaaForConditionalGeneration,
"feature-extraction": MaMaaaModel,
"summarization": MaMaaaForConditionalGeneration,
"text2text-generation": MaMaaaForConditionalGeneration,
"translation": MaMaaaForConditionalGeneration,
}
if is_torch_available()
else {}
)
a_ = True
a_ = True
a_ = False
a_ = False
def _lowercase ( self : int , __A : Tuple , __A : Any , __A : Optional[Any] , __A : Optional[Any] , __A : Union[str, Any] ):
if pipeline_test_casse_name == "TranslationPipelineTests":
# Get `ValueError: Translation requires a `src_lang` and a `tgt_lang` for this model`.
# `M2M100Config` was never used in pipeline tests: cannot create a simple tokenizer.
return True
return False
def _lowercase ( self : Tuple ):
snake_case__ : Any = MaMaaaModelTester(self )
snake_case__ : Dict = ConfigTester(self , config_class=__A )
def _lowercase ( self : Optional[Any] ):
self.config_tester.run_common_tests()
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : int = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
snake_case__ : int = model_class(__A )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__A )
snake_case__, snake_case__ : Optional[int] = model_class.from_pretrained(__A , output_loading_info=__A )
self.assertEqual(info["missing_keys"] , [] )
def _lowercase ( self : Dict ):
snake_case__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(*__A )
def _lowercase ( self : Any ):
snake_case__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__A )
def _lowercase ( self : Union[str, Any] ):
snake_case__, snake_case__ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in (MaMaaaModel, MaMaaaForConditionalGeneration):
snake_case__ : str = model_class(__A )
model.to(__A )
model.eval()
snake_case__ : str = copy.deepcopy(self._prepare_for_class(__A , __A ) )
if not self.is_encoder_decoder:
snake_case__ : Optional[Any] = inputs["input_ids"]
del inputs["input_ids"]
else:
snake_case__ : Union[str, Any] = inputs["input_ids"]
snake_case__ : List[str] = inputs.get("decoder_input_ids" , __A )
del inputs["input_ids"]
inputs.pop("decoder_input_ids" , __A )
snake_case__ : Tuple = model.get_input_embeddings()
if not self.is_encoder_decoder:
snake_case__ : List[Any] = wte(__A )
else:
snake_case__ : Any = wte(__A )
snake_case__ : Optional[int] = wte(__A )
with torch.no_grad():
model(**__A )[0]
def _lowercase ( self : Optional[Any] ):
snake_case__, snake_case__ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
snake_case__ : Any = input_dict["input_ids"]
snake_case__ : int = input_ids.ne(1 ).to(__A )
snake_case__ : List[Any] = MaMaaaForConditionalGeneration(__A ).eval().to(__A )
if torch_device == "cuda":
model.half()
model.generate(__A , attention_mask=__A )
model.generate(num_beams=4 , do_sample=__A , early_stopping=__A , num_return_sequences=3 )
def SCREAMING_SNAKE_CASE ( snake_case_ : int ):
return torch.tensor(snake_case_ , dtype=torch.long , device=snake_case_ )
__lowerCamelCase : Optional[Any] = 1e-4
@require_torch
@require_sentencepiece
@require_tokenizers
@slow
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
"""simple docstring"""
@cached_property
def _lowercase ( self : str ):
return MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" )
def _lowercase ( self : Optional[int] ):
snake_case__ : List[str] = MaMaaaModel.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : Optional[Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : str = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : str = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, 1_0_2_4) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : Optional[Any] = torch.tensor(
[[-0.7_7_8_0, -0.1_6_7_6, 0.1_0_3_8], [-6.7_5_5_6, -1.3_9_9_2, 0.0_5_6_7], [-7.5_3_8_3, -0.5_9_2_0, -0.2_7_7_9]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Union[str, Any] ):
snake_case__ : Union[str, Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
# change to intended input
snake_case__ : Union[str, Any] = _long_tensor([[1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8, 2]] )
snake_case__ : List[str] = _long_tensor([[2, 1_2_8_0_2_8, 9_8, 1_2, 3_0_5_2_7, 2_7_3_2, 1_5_9, 7_7_5_5, 6_1_9_0_4, 3_9_1_4_4, 3_8]] )
snake_case__ : int = prepare_mam_aaa_inputs_dict(model.config , __A , __A )
with torch.no_grad():
snake_case__ : Union[str, Any] = model(**__A )[0]
snake_case__ : Tuple = torch.Size((1, 1_1, model.config.vocab_size) )
self.assertEqual(output.shape , __A )
# change to expected output here
snake_case__ : List[str] = torch.tensor(
[[-1.0_4_4_8, -1.0_4_1_1, 3.7_9_9_2], [-3.2_1_9_1, -3.2_3_8_6, -1.3_4_5_1], [-3.6_2_1_0, -3.5_9_9_3, 0.4_9_2_5]] , device=__A )
self.assertTrue(torch.allclose(output[:, :3, :3] , __A , atol=__A ) )
def _lowercase ( self : Optional[Any] ):
snake_case__ : List[Any] = MaMaaaForConditionalGeneration.from_pretrained("facebook/m2m100_418M" ).to(__A )
snake_case__ : List[str] = MaMaaaTokenizer.from_pretrained("facebook/m2m100_418M" , src_lang="fr" , tgt_lang="en" )
snake_case__ : List[Any] = [
"L'affaire NSA souligne l'absence totale de débat sur le renseignement",
"Selon moi, il y a deux niveaux de réponse de la part du gouvernement français.",
"Lorsque François Hollande téléphone à Barack Obama ou quand le ministre des affaires étrangères Laurent"
" Fabius convoque l'ambassadeur des Etats-Unis, ils réagissent à une vraie découverte, qui est celle de"
" l'ampleur de la surveillance américaine sur l'ensemble des communications en France.",
]
# The below article tests that we don't add any hypotheses outside of the top n_beams
snake_case__ : str = tokenizer(__A , padding=__A , return_tensors="pt" )
snake_case__ : Tuple = model.generate(
input_ids=dct["input_ids"].to(__A ) , attention_mask=dct["attention_mask"].to(__A ) , num_beams=5 , forced_bos_token_id=tokenizer.get_lang_id("en" ) , )
snake_case__ : List[str] = [
"The NSA case highlights the total absence of intelligence debate",
"I think there are two levels of response from the French government.",
"When François Hollande calls Barack Obama or when Foreign Minister Laurent Fabius calls the U.S."
" Ambassador, they respond to a real discovery, which is that of the scale of U.S. surveillance on all"
" communications in France.",
]
snake_case__ : Dict = tokenizer.batch_decode(
hypotheses_batch.tolist() , clean_up_tokenization_spaces=__A , skip_special_tokens=__A )
assert generated == expected_en
| 25 | 1 |
from __future__ import annotations
from cmath import sqrt
def SCREAMING_SNAKE_CASE ( snake_case_ : int , snake_case_ : int , snake_case_ : int ):
if a == 0:
raise ValueError("Coefficient 'a' must not be zero." )
snake_case__ : Optional[Any] = b * b - 4 * a * c
snake_case__ : str = (-b + sqrt(snake_case_ )) / (2 * a)
snake_case__ : List[str] = (-b - sqrt(snake_case_ )) / (2 * a)
return (
root_a.real if not root_a.imag else root_a,
root_a.real if not root_a.imag else root_a,
)
def SCREAMING_SNAKE_CASE ( ):
snake_case__, snake_case__ : int = quadratic_roots(a=5 , b=6 , c=1 )
print(F'''The solutions are: {solutiona} and {solutiona}''' )
if __name__ == "__main__":
main()
| 25 |
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def SCREAMING_SNAKE_CASE ( snake_case_ : List[Any] , snake_case_ : Union[str, Any] ):
snake_case__ : Optional[int] = []
for part_id in partition_order:
snake_case__ : List[Any] = df.where(F'''SPARK_PARTITION_ID() = {part_id}''' ).collect()
for row_idx, row in enumerate(snake_case_ ):
expected_row_ids_and_row_dicts.append((F'''{part_id}_{row_idx}''', row.asDict()) )
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Tuple = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Union[str, Any] = spark.range(100 ).repartition(1 )
snake_case__ : Any = Spark(snake_case_ )
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16 )
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[Any] = spark.range(10 ).repartition(2 )
snake_case__ : Optional[Any] = [1, 0]
snake_case__ : Dict = _generate_iterable_examples(snake_case_ , snake_case_ ) # Reverse the partitions.
snake_case__ : Tuple = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , snake_case_ )
for i, (row_id, row_dict) in enumerate(generate_fn() ):
snake_case__, snake_case__ : Tuple = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Optional[int] = spark.range(10 ).repartition(1 )
snake_case__ : Union[str, Any] = SparkExamplesIterable(snake_case_ )
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(snake_case_ ):
assert row_id == F'''0_{i}'''
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Optional[int] = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : str = spark.range(30 ).repartition(3 )
# Mock the generator so that shuffle reverses the partition indices.
with patch("numpy.random.Generator" ) as generator_mock:
snake_case__ : Union[str, Any] = lambda snake_case_ : x.reverse()
snake_case__ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [2, 1, 0] )
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shuffle_data_sources(snake_case_ )
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(20 ).repartition(4 )
# Partitions 0 and 2
snake_case__ : List[Any] = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=0 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[str] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [0, 2] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[int] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
snake_case__ : Any = SparkExamplesIterable(snake_case_ ).shard_data_sources(worker_id=1 , num_workers=2 )
assert shard_it_a.n_shards == 2
snake_case__ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(snake_case_ , [1, 3] )
for i, (row_id, row_dict) in enumerate(snake_case_ ):
snake_case__, snake_case__ : Optional[Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Dict = pyspark.sql.SparkSession.builder.master("local[*]" ).appName("pyspark" ).getOrCreate()
snake_case__ : Tuple = spark.range(100 ).repartition(1 )
snake_case__ : Union[str, Any] = Spark(snake_case_ )
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1 )
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 25 | 1 |
from collections.abc import Iterable
from typing import Any
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Optional[Any] , __A : int | None = None ):
snake_case__ : Dict = value
snake_case__ : Node | None = None # Added in order to delete a node easier
snake_case__ : Node | None = None
snake_case__ : Node | None = None
def __repr__( self : List[str] ):
from pprint import pformat
if self.left is None and self.right is None:
return str(self.value )
return pformat({f'''{self.value}''': (self.left, self.right)} , indent=1 )
class SCREAMING_SNAKE_CASE__ :
"""simple docstring"""
def __init__( self : Tuple , __A : Node | None = None ):
snake_case__ : Union[str, Any] = root
def __str__( self : Optional[int] ):
return str(self.root )
def _lowercase ( self : Dict , __A : Node , __A : Node | None ):
if new_children is not None: # reset its kids
snake_case__ : Any = node.parent
if node.parent is not None: # reset its parent
if self.is_right(__A ): # If it is the right children
snake_case__ : List[str] = new_children
else:
snake_case__ : Dict = new_children
else:
snake_case__ : Union[str, Any] = new_children
def _lowercase ( self : Tuple , __A : Node ):
if node.parent and node.parent.right:
return node == node.parent.right
return False
def _lowercase ( self : List[Any] ):
return self.root is None
def _lowercase ( self : Dict , __A : Optional[int] ):
snake_case__ : List[str] = Node(__A ) # create a new Node
if self.empty(): # if Tree is empty
snake_case__ : List[str] = new_node # set its root
else: # Tree is not empty
snake_case__ : Any = self.root # from root
if parent_node is None:
return
while True: # While we don't get to a leaf
if value < parent_node.value: # We go left
if parent_node.left is None:
snake_case__ : Optional[int] = new_node # We insert the new node in a leaf
break
else:
snake_case__ : int = parent_node.left
else:
if parent_node.right is None:
snake_case__ : Union[str, Any] = new_node
break
else:
snake_case__ : Dict = parent_node.right
snake_case__ : int = parent_node
def _lowercase ( self : Optional[Any] , *__A : Tuple ):
for value in values:
self.__insert(__A )
def _lowercase ( self : List[str] , __A : List[Any] ):
if self.empty():
raise IndexError("Warning: Tree is empty! please use another." )
else:
snake_case__ : str = self.root
# use lazy evaluation here to avoid NoneType Attribute error
while node is not None and node.value is not value:
snake_case__ : List[Any] = node.left if value < node.value else node.right
return node
def _lowercase ( self : Dict , __A : Node | None = None ):
if node is None:
if self.root is None:
return None
snake_case__ : List[Any] = self.root
if not self.empty():
while node.right is not None:
snake_case__ : Any = node.right
return node
def _lowercase ( self : Union[str, Any] , __A : Node | None = None ):
if node is None:
snake_case__ : str = self.root
if self.root is None:
return None
if not self.empty():
snake_case__ : int = self.root
while node.left is not None:
snake_case__ : str = node.left
return node
def _lowercase ( self : Tuple , __A : int ):
snake_case__ : Tuple = self.search(__A ) # Look for the node with that label
if node is not None:
if node.left is None and node.right is None: # If it has no children
self.__reassign_nodes(__A , __A )
elif node.left is None: # Has only right children
self.__reassign_nodes(__A , node.right )
elif node.right is None: # Has only left children
self.__reassign_nodes(__A , node.left )
else:
snake_case__ : Any = self.get_max(
node.left ) # Gets the max value of the left branch
self.remove(tmp_node.value ) # type: ignore
snake_case__ : int = (
tmp_node.value # type: ignore
) # Assigns the value to the node to delete and keep tree structure
def _lowercase ( self : Dict , __A : Node | None ):
if node is not None:
yield node # Preorder Traversal
yield from self.preorder_traverse(node.left )
yield from self.preorder_traverse(node.right )
def _lowercase ( self : Optional[int] , __A : Optional[int]=None ):
if traversal_function is None:
return self.preorder_traverse(self.root )
else:
return traversal_function(self.root )
def _lowercase ( self : Optional[int] , __A : list , __A : Node | None ):
if node:
self.inorder(__A , node.left )
arr.append(node.value )
self.inorder(__A , node.right )
def _lowercase ( self : str , __A : int , __A : Node ):
snake_case__ : list[int] = []
self.inorder(__A , __A ) # append all values to list using inorder traversal
return arr[k - 1]
def SCREAMING_SNAKE_CASE ( snake_case_ : Node | None ):
snake_case__ : Union[str, Any] = []
if curr_node is not None:
snake_case__ : Union[str, Any] = postorder(curr_node.left ) + postorder(curr_node.right ) + [curr_node]
return node_list
def SCREAMING_SNAKE_CASE ( ):
snake_case__ : Any = (8, 3, 6, 1, 10, 14, 13, 4, 7)
snake_case__ : int = BinarySearchTree()
for i in testlist:
t.insert(snake_case_ )
# Prints all the elements of the list in order traversal
print(snake_case_ )
if t.search(6 ) is not None:
print("The value 6 exists" )
else:
print("The value 6 doesn't exist" )
if t.search(-1 ) is not None:
print("The value -1 exists" )
else:
print("The value -1 doesn't exist" )
if not t.empty():
print("Max Value: " , t.get_max().value ) # type: ignore
print("Min Value: " , t.get_min().value ) # type: ignore
for i in testlist:
t.remove(snake_case_ )
print(snake_case_ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
| 25 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__lowerCamelCase : List[str] = {"""configuration_xlnet""": ["""XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP""", """XLNetConfig"""]}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = ["""XLNetTokenizer"""]
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = ["""XLNetTokenizerFast"""]
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : str = [
"""XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""XLNetForMultipleChoice""",
"""XLNetForQuestionAnswering""",
"""XLNetForQuestionAnsweringSimple""",
"""XLNetForSequenceClassification""",
"""XLNetForTokenClassification""",
"""XLNetLMHeadModel""",
"""XLNetModel""",
"""XLNetPreTrainedModel""",
"""load_tf_weights_in_xlnet""",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : Dict = [
"""TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""TFXLNetForMultipleChoice""",
"""TFXLNetForQuestionAnsweringSimple""",
"""TFXLNetForSequenceClassification""",
"""TFXLNetForTokenClassification""",
"""TFXLNetLMHeadModel""",
"""TFXLNetMainLayer""",
"""TFXLNetModel""",
"""TFXLNetPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_xlnet import XLNET_PRETRAINED_CONFIG_ARCHIVE_MAP, XLNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet import XLNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xlnet_fast import XLNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xlnet import (
XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
XLNetForMultipleChoice,
XLNetForQuestionAnswering,
XLNetForQuestionAnsweringSimple,
XLNetForSequenceClassification,
XLNetForTokenClassification,
XLNetLMHeadModel,
XLNetModel,
XLNetPreTrainedModel,
load_tf_weights_in_xlnet,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xlnet import (
TF_XLNET_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXLNetForMultipleChoice,
TFXLNetForQuestionAnsweringSimple,
TFXLNetForSequenceClassification,
TFXLNetForTokenClassification,
TFXLNetLMHeadModel,
TFXLNetMainLayer,
TFXLNetModel,
TFXLNetPreTrainedModel,
)
else:
import sys
__lowerCamelCase : int = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 | 1 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
__lowerCamelCase : Optional[Any] = {
"""configuration_lilt""": ["""LILT_PRETRAINED_CONFIG_ARCHIVE_MAP""", """LiltConfig"""],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__lowerCamelCase : int = [
"""LILT_PRETRAINED_MODEL_ARCHIVE_LIST""",
"""LiltForQuestionAnswering""",
"""LiltForSequenceClassification""",
"""LiltForTokenClassification""",
"""LiltModel""",
"""LiltPreTrainedModel""",
]
if TYPE_CHECKING:
from .configuration_lilt import LILT_PRETRAINED_CONFIG_ARCHIVE_MAP, LiltConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lilt import (
LILT_PRETRAINED_MODEL_ARCHIVE_LIST,
LiltForQuestionAnswering,
LiltForSequenceClassification,
LiltForTokenClassification,
LiltModel,
LiltPreTrainedModel,
)
else:
import sys
__lowerCamelCase : Optional[int] = _LazyModule(__name__, globals()["""__file__"""], _import_structure, module_spec=__spec__)
| 25 |
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import KandinskyPipeline, KandinskyPriorPipeline
else:
from .pipeline_kandinsky import KandinskyPipeline
from .pipeline_kandinsky_imgaimg import KandinskyImgaImgPipeline
from .pipeline_kandinsky_inpaint import KandinskyInpaintPipeline
from .pipeline_kandinsky_prior import KandinskyPriorPipeline, KandinskyPriorPipelineOutput
from .text_encoder import MultilingualCLIP
| 25 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.