code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import unittest
import numpy as np
import torch
from .utils_summarization import build_mask, compute_token_type_ids, process_story, truncate_or_pad
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
A = 10
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
A = [1, 2, 3, 4]
A = [1, 2, 3, 4, 0, 0, 0, 0, 0, 0]
self.assertEqual(truncate_or_pad(lowerCAmelCase__ ,self.block_size ,0 ) ,lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
A = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
A = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCAmelCase__ ,self.block_size ,0 ) ,lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
A = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]
A = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
self.assertEqual(truncate_or_pad(lowerCAmelCase__ ,self.block_size ,0 ) ,lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
A = 'It was the year of Our Lord one thousand seven hundred and\n seventy-five.\n\nSpiritual revelations were conceded to England at that\n favoured period, as at this.'
A , A = process_story(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ ,[] )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
A = ''
A , A = process_story(lowerCAmelCase__ )
self.assertEqual(lowerCAmelCase__ ,[] )
self.assertEqual(lowerCAmelCase__ ,[] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
A = (
'It was the year of Our Lord one thousand seven hundred and '
'seventy-five\n\nSpiritual revelations were conceded to England '
'at that favoured period, as at this.\n@highlight\n\nIt was the best of times'
)
A , A = process_story(lowerCAmelCase__ )
A = [
'It was the year of Our Lord one thousand seven hundred and seventy-five.',
'Spiritual revelations were conceded to England at that favoured period, as at this.',
]
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
A = ['It was the best of times.']
self.assertEqual(lowerCAmelCase__ ,lowerCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
A = torch.tensor([1, 2, 3, 4] )
A = torch.tensor([1, 1, 1, 1] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase__ ,0 ).numpy() ,expected.numpy() )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = torch.tensor([1, 2, 3, 4, 23, 23, 23] )
A = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase__ ,23 ).numpy() ,expected.numpy() )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
A = torch.tensor([8, 2, 3, 4, 1, 1, 1] )
A = torch.tensor([1, 1, 1, 1, 0, 0, 0] )
np.testing.assert_array_equal(build_mask(lowerCAmelCase__ ,1 ).numpy() ,expected.numpy() )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
A = 101
A = torch.tensor([[1, 2, 3, 4, 5, 6], [1, 2, 3, 101, 5, 6], [1, 101, 3, 4, 101, 6]] )
A = torch.tensor([[1, 1, 1, 1, 1, 1], [1, 1, 1, 0, 0, 0], [1, 0, 0, 0, 1, 1]] )
A = compute_token_type_ids(lowerCAmelCase__ ,lowerCAmelCase__ )
np.testing.assert_array_equal(lowerCAmelCase__ ,lowerCAmelCase__ ) | 701 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_lowercase = 8
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int]=BITS ):
A = x.device
A = (x * 255).int().clamp(0 , 255 )
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b c h w -> b c 1 h w' )
A = ((x & mask) != 0).float()
A = rearrange(snake_case__ , 'b c d h w -> b (c d) h w' )
A = bits * 2 - 1
return bits
def _snake_case ( snake_case__ : Any , snake_case__ : Any=BITS ):
A = x.device
A = (x > 0).int()
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b (c d) h w -> b c d h w' , d=8 )
A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = True , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
A = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
A = self.alphas_cumprod[timestep]
A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
A = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
A = self._get_variance(snake_case__ , snake_case__ )
A = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
A = model_output.device if torch.is_tensor(snake_case__ ) else 'cpu'
A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ )
A = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise
A = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Tuple="epsilon" , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
A = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
A , A = torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
A = None
# 1. compute alphas, betas
A = self.alphas_cumprod[t]
A = self.alphas_cumprod[t - 1] if t > 0 else self.one
A = 1 - alpha_prod_t
A = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
A = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A = 0
if t > 0:
A = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device )
A = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,A_ : UNetaDConditionModel ,A_ : Union[DDIMScheduler, DDPMScheduler] ,A_ : Optional[float] = 1.0 ,) -> Optional[int]:
super().__init__()
A = bit_scale
A = (
ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A_ ,scheduler=A_ )
@torch.no_grad()
def __call__( self : Tuple ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 50 ,A_ : Optional[torch.Generator] = None ,A_ : Optional[int] = 1 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]:
A = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,)
A = decimal_to_bits(A_ ) * self.bit_scale
A = latents.to(self.device )
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
A = self.unet(A_ ,A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample
A = bits_to_decimal(A_ )
if output_type == "pil":
A = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ ) | 22 | 0 |
"""simple docstring"""
import json
import os
import re
import unicodedata
from json.encoder import INFINITY
from typing import Any, Dict, List, Optional, Tuple, Union
import numpy as np
import regex
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, is_flax_available, is_tf_available, is_torch_available, logging
from ...utils.generic import _is_jax, _is_numpy
_lowercase = logging.get_logger(__name__)
_lowercase = {
'artists_file': 'artists.json',
'lyrics_file': 'lyrics.json',
'genres_file': 'genres.json',
}
_lowercase = {
'artists_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/artists.json',
},
'genres_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/genres.json',
},
'lyrics_file': {
'jukebox': 'https://huggingface.co/ArthurZ/jukebox/blob/main/lyrics.json',
},
}
_lowercase = {
'jukebox': 5_12,
}
class lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
_lowerCamelCase: int = VOCAB_FILES_NAMES
_lowerCamelCase: Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: int = PRETRAINED_LYRIC_TOKENS_SIZES
_lowerCamelCase: str = ["""input_ids""", """attention_mask"""]
def __init__( self : Optional[Any] ,A_ : Dict ,A_ : Optional[Any] ,A_ : str ,A_ : Optional[Any]=["v3", "v2", "v2"] ,A_ : Union[str, Any]=512 ,A_ : Dict=5 ,A_ : List[str]="<|endoftext|>" ,**A_ : List[str] ,) -> Any:
A = AddedToken(UpperCAmelCase_ ,lstrip=UpperCAmelCase_ ,rstrip=UpperCAmelCase_ ) if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ) else unk_token
super().__init__(
unk_token=UpperCAmelCase_ ,n_genres=UpperCAmelCase_ ,version=UpperCAmelCase_ ,max_n_lyric_tokens=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
A = version
A = max_n_lyric_tokens
A = n_genres
with open(UpperCAmelCase_ ,encoding='utf-8' ) as vocab_handle:
A = json.load(UpperCAmelCase_ )
with open(UpperCAmelCase_ ,encoding='utf-8' ) as vocab_handle:
A = json.load(UpperCAmelCase_ )
with open(UpperCAmelCase_ ,encoding='utf-8' ) as vocab_handle:
A = json.load(UpperCAmelCase_ )
A = R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+'
# In v2, we had a n_vocab=80 and in v3 we missed + and so n_vocab=79 of characters.
if len(self.lyrics_encoder ) == 79:
A = oov.replace(R'\-\'' ,R'\-+\'' )
A = regex.compile(UpperCAmelCase_ )
A = {v: k for k, v in self.artists_encoder.items()}
A = {v: k for k, v in self.genres_encoder.items()}
A = {v: k for k, v in self.lyrics_encoder.items()}
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return len(self.artists_encoder ) + len(self.genres_encoder ) + len(self.lyrics_encoder )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
return dict(self.artists_encoder ,self.genres_encoder ,self.lyrics_encoder )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple ,A_ : str ,A_ : List[str] ) -> int:
A = [self.artists_encoder.get(UpperCAmelCase_ ,0 ) for artist in list_artists]
for genres in range(len(UpperCAmelCase_ ) ):
A = [self.genres_encoder.get(UpperCAmelCase_ ,0 ) for genre in list_genres[genres]]
A = list_genres[genres] + [-1] * (self.n_genres - len(list_genres[genres] ))
A = [[self.lyrics_encoder.get(UpperCAmelCase_ ,0 ) for character in list_lyrics[0]], [], []]
return artists_id, list_genres, lyric_ids
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : List[str] ) -> Any:
return list(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : str ,A_ : int ,A_ : List[Any] ,**A_ : Optional[Any] ) -> List[str]:
A , A , A = self.prepare_for_tokenization(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
A = self._tokenize(UpperCAmelCase_ )
return artist, genre, lyrics
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : str ,A_ : str ,A_ : str ,A_ : bool = False ) -> Tuple[str, str, str, Dict[str, Any]]:
for idx in range(len(self.version ) ):
if self.version[idx] == "v3":
A = artists[idx].lower()
A = [genres[idx].lower()]
else:
A = self._normalize(artists[idx] ) + '.v2'
A = [
self._normalize(UpperCAmelCase_ ) + '.v2' for genre in genres[idx].split('_' )
] # split is for the full dictionary with combined genres
if self.version[0] == "v2":
A = regex.compile(R'[^A-Za-z0-9.,:;!?\-\'\"()\[\] \t\n]+' )
A = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789.,:;!?-+\'\"()[] \t\n'
A = {vocab[index]: index + 1 for index in range(len(UpperCAmelCase_ ) )}
A = 0
A = len(UpperCAmelCase_ ) + 1
A = self.vocab
A = {v: k for k, v in self.vocab.items()}
A = ''
else:
A = regex.compile(R'[^A-Za-z0-9.,:;!?\-+\'\"()\[\] \t\n]+' )
A = self._run_strip_accents(UpperCAmelCase_ )
A = lyrics.replace('\\' ,'\n' )
A = self.out_of_vocab.sub('' ,UpperCAmelCase_ ), [], []
return artists, genres, lyrics
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[str] ) -> Tuple:
A = unicodedata.normalize('NFD' ,UpperCAmelCase_ )
A = []
for char in text:
A = unicodedata.category(UpperCAmelCase_ )
if cat == "Mn":
continue
output.append(UpperCAmelCase_ )
return "".join(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : str ) -> str:
A = (
[chr(UpperCAmelCase_ ) for i in range(ord('a' ) ,ord('z' ) + 1 )]
+ [chr(UpperCAmelCase_ ) for i in range(ord('A' ) ,ord('Z' ) + 1 )]
+ [chr(UpperCAmelCase_ ) for i in range(ord('0' ) ,ord('9' ) + 1 )]
+ ['.']
)
A = frozenset(UpperCAmelCase_ )
A = re.compile(R'_+' )
A = ''.join([c if c in accepted else '_' for c in text.lower()] )
A = pattern.sub('_' ,UpperCAmelCase_ ).strip('_' )
return text
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str] ) -> str:
return " ".join(UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : str ,A_ : Optional[Union[str, TensorType]] = None ,A_ : bool = False ) -> Union[str, Any]:
if not isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
A = TensorType(UpperCAmelCase_ )
# Get a function reference for the correct framework
if tensor_type == TensorType.TENSORFLOW:
if not is_tf_available():
raise ImportError(
'Unable to convert output to TensorFlow tensors format, TensorFlow is not installed.' )
import tensorflow as tf
A = tf.constant
A = tf.is_tensor
elif tensor_type == TensorType.PYTORCH:
if not is_torch_available():
raise ImportError('Unable to convert output to PyTorch tensors format, PyTorch is not installed.' )
import torch
A = torch.tensor
A = torch.is_tensor
elif tensor_type == TensorType.JAX:
if not is_flax_available():
raise ImportError('Unable to convert output to JAX tensors format, JAX is not installed.' )
import jax.numpy as jnp # noqa: F811
A = jnp.array
A = _is_jax
else:
A = np.asarray
A = _is_numpy
# Do the tensor conversion in batch
try:
if prepend_batch_axis:
A = [inputs]
if not is_tensor(UpperCAmelCase_ ):
A = as_tensor(UpperCAmelCase_ )
except: # noqa E722
raise ValueError(
'Unable to create tensor, you should probably activate truncation and/or padding '
'with \'padding=True\' \'truncation=True\' to have batched tensors with the same length.' )
return inputs
def __call__( self : Any ,A_ : List[str] ,A_ : List[str] ,A_ : List[Any]="" ,A_ : List[Any]="pt" ) -> BatchEncoding:
A = [0, 0, 0]
A = [artist] * len(self.version )
A = [genres] * len(self.version )
A , A , A = self.tokenize(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
A , A , A = self._convert_token_to_id(UpperCAmelCase_ ,UpperCAmelCase_ ,UpperCAmelCase_ )
A = [-INFINITY] * len(full_tokens[-1] )
A = [
self.convert_to_tensors(
[input_ids + [artists_id[i]] + genres_ids[i] + full_tokens[i]] ,tensor_type=UpperCAmelCase_ )
for i in range(len(self.version ) )
]
return BatchEncoding({'input_ids': input_ids, 'attention_masks': attention_masks} )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
UpperCAmelCase_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['artists_file'] )
with open(UpperCAmelCase_ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.artists_encoder ,ensure_ascii=UpperCAmelCase_ ) )
A = os.path.join(
UpperCAmelCase_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['genres_file'] )
with open(UpperCAmelCase_ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.genres_encoder ,ensure_ascii=UpperCAmelCase_ ) )
A = os.path.join(
UpperCAmelCase_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['lyrics_file'] )
with open(UpperCAmelCase_ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.lyrics_encoder ,ensure_ascii=UpperCAmelCase_ ) )
return (artists_file, genres_file, lyrics_file)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[str] ,A_ : Dict ,A_ : List[Any] ) -> Union[str, Any]:
A = self.artists_decoder.get(UpperCAmelCase_ )
A = [self.genres_decoder.get(UpperCAmelCase_ ) for genre in genres_index]
A = [self.lyrics_decoder.get(UpperCAmelCase_ ) for character in lyric_index]
return artist, genres, lyrics | 702 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''yolos'''
def __init__( self : Dict ,A_ : Optional[Any]=768 ,A_ : int=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.0 ,A_ : List[Any]=0.0 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : List[Any]=[512, 864] ,A_ : Union[str, Any]=16 ,A_ : List[str]=3 ,A_ : Optional[int]=True ,A_ : Tuple=100 ,A_ : str=True ,A_ : Optional[Any]=False ,A_ : Any=1 ,A_ : Optional[Any]=5 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=5 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.1 ,**A_ : Tuple ,) -> Any:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return 12 | 22 | 0 |
"""simple docstring"""
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DeformableDetrImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int ,A_ : Dict ,A_ : Union[str, Any]=7 ,A_ : List[Any]=3 ,A_ : Any=30 ,A_ : Tuple=400 ,A_ : int=True ,A_ : str=None ,A_ : Any=True ,A_ : int=[0.5, 0.5, 0.5] ,A_ : Any=[0.5, 0.5, 0.5] ,A_ : int=True ,A_ : Any=1 / 255 ,A_ : List[str]=True ,) -> int:
A = size if size is not None else {'shortest_edge': 18, 'longest_edge': 1333}
A = parent
A = batch_size
A = num_channels
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
A = image_mean
A = image_std
A = do_rescale
A = rescale_factor
A = do_pad
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_pad": self.do_pad,
}
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Tuple ,A_ : Tuple=False ) -> str:
if not batched:
A = image_inputs[0]
if isinstance(A_ ,Image.Image ):
A , A = image.size
else:
A , A = image.shape[1], image.shape[2]
if w < h:
A = int(self.size['shortest_edge'] * h / w )
A = self.size['shortest_edge']
elif w > h:
A = self.size['shortest_edge']
A = int(self.size['shortest_edge'] * w / h )
else:
A = self.size['shortest_edge']
A = self.size['shortest_edge']
else:
A = []
for image in image_inputs:
A , A = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
A = max(A_ ,key=lambda A_ : item[0] )[0]
A = max(A_ ,key=lambda A_ : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowerCAmelCase_ ( _SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = DeformableDetrImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
A = DeformableDetrImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'image_mean' ) )
self.assertTrue(hasattr(A_ ,'image_std' ) )
self.assertTrue(hasattr(A_ ,'do_normalize' ) )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'do_rescale' ) )
self.assertTrue(hasattr(A_ ,'do_pad' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'shortest_edge': 18, 'longest_edge': 1333} )
self.assertEqual(image_processor.do_pad ,A_ )
A = self.image_processing_class.from_dict(
self.image_processor_dict ,size=42 ,max_size=84 ,pad_and_return_pixel_mask=A_ )
self.assertEqual(image_processor.size ,{'shortest_edge': 42, 'longest_edge': 84} )
self.assertEqual(image_processor.do_pad ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A_ )
for image in image_inputs:
self.assertIsInstance(A_ ,Image.Image )
# Test not batched input
A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
A , A = self.image_processor_tester.get_expected_values(A_ ,batched=A_ )
A = image_processing(A_ ,return_tensors='pt' ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def _SCREAMING_SNAKE_CASE ( self : str ) -> Any:
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A_ ,numpify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ ,np.ndarray )
# Test not batched input
A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
A = image_processing(A_ ,return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(A_ ,batched=A_ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=A_ ,torchify=A_ )
for image in image_inputs:
self.assertIsInstance(A_ ,torch.Tensor )
# Test not batched input
A = image_processing(image_inputs[0] ,return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(A_ )
self.assertEqual(
encoded_images.shape ,(1, self.image_processor_tester.num_channels, expected_height, expected_width) ,)
# Test batched
A = image_processing(A_ ,return_tensors='pt' ).pixel_values
A , A = self.image_processor_tester.get_expected_values(A_ ,batched=A_ )
self.assertEqual(
encoded_images.shape ,(
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) ,)
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_annotations.txt' ,'r' ) as f:
A = json.loads(f.read() )
A = {'image_id': 3_9769, 'annotations': target}
# encode them
A = DeformableDetrImageProcessor()
A = image_processing(images=A_ ,annotations=A_ ,return_tensors='pt' )
# verify pixel values
A = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,A_ )
A = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,A_ ,atol=1e-4 ) )
# verify area
A = torch.tensor([58_87.96_00, 1_1250.2061, 48_9353.8438, 83_7122.7500, 14_7967.5156, 16_5732.3438] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,A_ ) )
# verify boxes
A = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,A_ )
A = torch.tensor([0.55_03, 0.27_65, 0.06_04, 0.22_15] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,A_ ,atol=1e-3 ) )
# verify image_id
A = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,A_ ) )
# verify is_crowd
A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,A_ ) )
# verify class_labels
A = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,A_ ) )
# verify orig_size
A = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,A_ ) )
# verify size
A = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,A_ ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
with open('./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt' ,'r' ) as f:
A = json.loads(f.read() )
A = {'file_name': '000000039769.png', 'image_id': 3_9769, 'segments_info': target}
A = pathlib.Path('./tests/fixtures/tests_samples/COCO/coco_panoptic' )
# encode them
A = DeformableDetrImageProcessor(format='coco_panoptic' )
A = image_processing(images=A_ ,annotations=A_ ,masks_path=A_ ,return_tensors='pt' )
# verify pixel values
A = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding['pixel_values'].shape ,A_ )
A = torch.tensor([0.27_96, 0.31_38, 0.34_81] )
self.assertTrue(torch.allclose(encoding['pixel_values'][0, 0, 0, :3] ,A_ ,atol=1e-4 ) )
# verify area
A = torch.tensor([14_7979.6875, 16_5527.0469, 48_4638.5938, 1_1292.9375, 58_79.65_62, 76_34.11_47] )
self.assertTrue(torch.allclose(encoding['labels'][0]['area'] ,A_ ) )
# verify boxes
A = torch.Size([6, 4] )
self.assertEqual(encoding['labels'][0]['boxes'].shape ,A_ )
A = torch.tensor([0.26_25, 0.54_37, 0.46_88, 0.86_25] )
self.assertTrue(torch.allclose(encoding['labels'][0]['boxes'][0] ,A_ ,atol=1e-3 ) )
# verify image_id
A = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding['labels'][0]['image_id'] ,A_ ) )
# verify is_crowd
A = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding['labels'][0]['iscrowd'] ,A_ ) )
# verify class_labels
A = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding['labels'][0]['class_labels'] ,A_ ) )
# verify masks
A = 82_2873
self.assertEqual(encoding['labels'][0]['masks'].sum().item() ,A_ )
# verify orig_size
A = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding['labels'][0]['orig_size'] ,A_ ) )
# verify size
A = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding['labels'][0]['size'] ,A_ ) )
| 703 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=0 ) -> Dict:
A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) )
A = np.random.RandomState(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = ort.SessionOptions()
A = False
return options
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 22 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
_lowercase = {
'''configuration_falcon''': ['''FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FalconConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FALCON_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FalconForCausalLM''',
'''FalconModel''',
'''FalconPreTrainedModel''',
'''FalconForSequenceClassification''',
'''FalconForTokenClassification''',
'''FalconForQuestionAnswering''',
]
if TYPE_CHECKING:
from .configuration_falcon import FALCON_PRETRAINED_CONFIG_ARCHIVE_MAP, FalconConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_falcon import (
FALCON_PRETRAINED_MODEL_ARCHIVE_LIST,
FalconForCausalLM,
FalconForQuestionAnswering,
FalconForSequenceClassification,
FalconForTokenClassification,
FalconModel,
FalconPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 704 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''pixel_values''']
def __init__( self : Optional[Any] ,A_ : bool = True ,A_ : Optional[Dict[str, int]] = None ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Optional[Any] ,) -> None:
super().__init__(**A_ )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(A_ ,default_to_square=A_ )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ ,default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ )
return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : float ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[str] ) -> np.ndarray:
return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Any ,) -> np.ndarray:
return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : ImageInput ,A_ : Optional[bool] = None ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : Optional[bool] = None ,A_ : Optional[float] = None ,A_ : Optional[bool] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**A_ : Tuple ,) -> List[Any]:
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(A_ ,default_to_square=A_ )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(A_ ) for image in images]
if do_resize:
A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images]
if do_center_crop:
A = [self.center_crop(image=A_ ,size=A_ ) for image in images]
if do_rescale:
A = [self.rescale(image=A_ ,scale=A_ ) for image in images]
if do_normalize:
A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images]
A = [to_channel_dimension_format(A_ ,A_ ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=A_ ,tensor_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : List[Tuple] = None ) -> str:
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(A_ ):
A = target_sizes.numpy()
A = []
for idx in range(len(A_ ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=A_ )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 22 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import torch
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionUpscalePipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = '''ssube/stable-diffusion-x4-upscaler-onnx'''
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Dict=0 ) -> Tuple:
A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(__lowerCamelCase ) )
A = torch.manual_seed(__lowerCamelCase )
A = {
"prompt": "A painting of a squirrel eating a burger",
"image": image,
"generator": generator,
"num_inference_steps": 3,
"guidance_scale": 7.5,
"output_type": "numpy",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__lowerCamelCase ).images
A = image[0, -3:, -3:, -1].flatten()
# started as 128, should now be 512
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=__lowerCamelCase )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__lowerCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.6_89_88_92, 0.59_24_05_56, 0.52_49_95_27, 0.58_86_62_15, 0.52_25_82_35, 0.52_57_27_15, 0.62_41_44_73, 0.6_17_43_87, 0.6_21_49_64] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__lowerCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.7_65_92_78, 0.76_43_76_64, 0.75_57_91_07, 0.7_69_11_16, 0.77_66_69_86, 0.7_72_76_72, 0.7_75_86_64, 0.7_81_22_26, 0.76_94_25_15] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__lowerCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.6_97_47_82, 0.68_90_20_93, 0.70_13_58_85, 0.7_58_36_18, 0.7_80_45_45, 0.7_85_49_12, 0.78_66_74_26, 0.78_74_38_63, 0.78_07_02_23] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A = self.get_dummy_inputs()
A = pipe(**__lowerCamelCase ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 512, 512, 3)
A = np.array(
[0.77_42_44_96, 0.77_36_01, 0.7_64_52_88, 0.7_76_95_98, 0.7_77_27_39, 0.7_73_86_88, 0.78_18_72_33, 0.77_87_95_84, 0.76_70_43] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
A = ort.SessionOptions()
A = False
return options
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((128, 128) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A = "A fantasy landscape, trending on artstation"
A = torch.manual_seed(0 )
A = pipe(
prompt=__lowerCamelCase ,image=__lowerCamelCase ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=__lowerCamelCase ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A = np.array([0.48_83, 0.49_47, 0.49_80, 0.49_75, 0.49_82, 0.49_80, 0.50_00, 0.50_06, 0.49_72] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((128, 128) )
A = LMSDiscreteScheduler.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' ,subfolder='scheduler' )
A = OnnxStableDiffusionUpscalePipeline.from_pretrained(
'ssube/stable-diffusion-x4-upscaler-onnx' ,scheduler=__lowerCamelCase ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=__lowerCamelCase )
A = "A fantasy landscape, trending on artstation"
A = torch.manual_seed(0 )
A = pipe(
prompt=__lowerCamelCase ,image=__lowerCamelCase ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=__lowerCamelCase ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 512, 3)
A = np.array(
[0.50_17_37_53, 0.50_22_33_56, 0.50_20_39, 0.50_23_30_36, 0.5_02_37_25, 0.5_02_26_01, 0.5_01_87_58, 0.50_23_40_85, 0.50_24_15_66] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 705 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowercase = data_utils.TransfoXLTokenizer
_lowercase = data_utils.TransfoXLCorpus
_lowercase = data_utils
_lowercase = data_utils
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case__ , 'rb' ) as fp:
A = pickle.load(snake_case__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
A = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
A = corpus.vocab.__dict__
torch.save(snake_case__ , snake_case__ )
A = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , snake_case__ )
A = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(snake_case__ , snake_case__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
A = os.path.abspath(snake_case__ )
A = os.path.abspath(snake_case__ )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
A = TransfoXLConfig()
else:
A = TransfoXLConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
A = TransfoXLLMHeadModel(snake_case__ )
A = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
A = os.path.join(snake_case__ , snake_case__ )
A = os.path.join(snake_case__ , snake_case__ )
print(F'Save PyTorch model to {os.path.abspath(snake_case__ )}' )
torch.save(model.state_dict() , snake_case__ )
print(F'Save configuration file to {os.path.abspath(snake_case__ )}' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
_lowercase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 22 | 0 |
"""simple docstring"""
import os
import warnings
from typing import List, Optional
from ...tokenization_utils_base import BatchEncoding
from ...utils import logging
from .configuration_rag import RagConfig
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any ,A_ : Optional[Any] ,A_ : int ) -> List[str]:
A = question_encoder
A = generator
A = self.question_encoder
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Any ) -> Optional[Any]:
if os.path.isfile(UpperCAmelCase_ ):
raise ValueError(F'Provided path ({save_directory}) should be a directory, not a file' )
os.makedirs(UpperCAmelCase_ ,exist_ok=UpperCAmelCase_ )
A = os.path.join(UpperCAmelCase_ ,'question_encoder_tokenizer' )
A = os.path.join(UpperCAmelCase_ ,'generator_tokenizer' )
self.question_encoder.save_pretrained(UpperCAmelCase_ )
self.generator.save_pretrained(UpperCAmelCase_ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : str ,A_ : Any ,**A_ : List[Any] ) -> List[Any]:
# dynamically import AutoTokenizer
from ..auto.tokenization_auto import AutoTokenizer
A = kwargs.pop('config' ,UpperCAmelCase_ )
if config is None:
A = RagConfig.from_pretrained(UpperCAmelCase_ )
A = AutoTokenizer.from_pretrained(
UpperCAmelCase_ ,config=config.question_encoder ,subfolder='question_encoder_tokenizer' )
A = AutoTokenizer.from_pretrained(
UpperCAmelCase_ ,config=config.generator ,subfolder='generator_tokenizer' )
return cls(question_encoder=UpperCAmelCase_ ,generator=UpperCAmelCase_ )
def __call__( self : str ,*A_ : Optional[Any] ,**A_ : Any ) -> Tuple:
return self.current_tokenizer(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : str ,*A_ : List[Any] ,**A_ : str ) -> List[str]:
return self.generator.batch_decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,*A_ : Optional[Any] ,**A_ : Dict ) -> Union[str, Any]:
return self.generator.decode(*UpperCAmelCase_ ,**UpperCAmelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = self.question_encoder
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = self.generator
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[str] ,A_ : Optional[List[str]] = None ,A_ : Optional[int] = None ,A_ : Optional[int] = None ,A_ : str = "longest" ,A_ : str = None ,A_ : bool = True ,**A_ : Tuple ,) -> Any:
warnings.warn(
'`prepare_seq2seq_batch` is deprecated and will be removed in version 5 of 🤗 Transformers. Use the '
'regular `__call__` method to prepare your inputs and the tokenizer under the `with_target_tokenizer` '
'context manager to prepare your targets. See the documentation of your specific tokenizer for more '
'details' ,UpperCAmelCase_ ,)
if max_length is None:
A = self.current_tokenizer.model_max_length
A = self(
UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
if tgt_texts is None:
return model_inputs
# Process tgt_texts
if max_target_length is None:
A = self.current_tokenizer.model_max_length
A = self(
text_target=UpperCAmelCase_ ,add_special_tokens=UpperCAmelCase_ ,return_tensors=UpperCAmelCase_ ,padding=UpperCAmelCase_ ,max_length=UpperCAmelCase_ ,truncation=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
A = labels['input_ids']
return model_inputs
| 706 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ) -> int:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Optional[Any]=1 ) -> int:
if self.graph.get(A_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A = [[w, v]]
if not self.graph.get(A_ ):
A = []
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ) -> Optional[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int=-2 ,A_ : Dict=-1 ) -> str:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any=-1 ) -> int:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any]=-2 ) -> Optional[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ) -> Any:
A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> str:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any]=-2 ) -> Any:
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return sorted_nodes
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple=-2 ,A_ : List[str]=-1 ) -> str:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any]=-2 ) -> Dict:
A = time()
self.bfs(A_ )
A = time()
return end - begin
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> Tuple:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[str]=1 ) -> Dict:
# check if the u exists
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A = [[w, v]]
# add the other way
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A = [[w, u]]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : List[str] ) -> List[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
# the other way round
if self.graph.get(A_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=-2 ,A_ : List[Any]=-1 ) -> int:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=-1 ) -> List[Any]:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict=-2 ) -> List[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[Any]:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any]=-2 ,A_ : List[str]=-1 ) -> Any:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=-2 ) -> Union[str, Any]:
A = time()
self.bfs(A_ )
A = time()
return end - begin | 22 | 0 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( snake_case__ : list[float] , snake_case__ : list[float] ):
A = sorted(numsa + numsa )
A = divmod(len(snake_case__ ) , 2 )
if mod == 1:
return all_numbers[div]
else:
return (all_numbers[div] + all_numbers[div - 1]) / 2
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = [float(x) for x in input('''Enter the elements of first array: ''').split()]
_lowercase = [float(x) for x in input('''Enter the elements of second array: ''').split()]
print(f"""The median of two arrays is: {median_of_two_arrays(array_a, array_a)}""") | 707 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _snake_case ( snake_case__ : str = "isbn/0140328726" ):
A = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
A = F'{olid} is not a valid Open Library olid'
raise ValueError(snake_case__ )
return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json()
def _snake_case ( snake_case__ : dict ):
A = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
A = data['First sentence']['value']
for key, value in data.items():
if isinstance(snake_case__ , snake_case__ ):
A = ', '.join(snake_case__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowercase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
_lowercase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""") | 22 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Sequence
from typing import Literal
def _snake_case ( snake_case__ : str , snake_case__ : str ):
A = list(__A )
A = list(__A )
A = 0
for i in range(len(__A ) ):
if lista[i] != lista[i]:
count += 1
A = '''_'''
if count > 1:
return False
else:
return "".join(__A )
def _snake_case ( snake_case__ : list[str] ):
A = []
while True:
A = ['''$'''] * len(__A )
A = []
for i in range(len(__A ) ):
for j in range(i + 1 , len(__A ) ):
A = compare_string(binary[i] , binary[j] )
if k is False:
A = '''*'''
A = '''*'''
temp.append('X' )
for i in range(len(__A ) ):
if checka[i] == "$":
pi.append(binary[i] )
if len(__A ) == 0:
return pi
A = list(set(__A ) )
def _snake_case ( snake_case__ : int , snake_case__ : Sequence[float] ):
A = []
for minterm in minterms:
A = ''''''
for _ in range(__A ):
A = str(minterm % 2 ) + string
minterm //= 2
temp.append(__A )
return temp
def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : int ):
A = list(__A )
A = list(__A )
A = 0
for i in range(len(__A ) ):
if lista[i] != lista[i]:
count_n += 1
return count_n == count
def _snake_case ( snake_case__ : list[list[int]] , snake_case__ : list[str] ):
A = []
A = [0] * len(__A )
for i in range(len(chart[0] ) ):
A = 0
A = -1
for j in range(len(__A ) ):
if chart[j][i] == 1:
count += 1
A = j
if count == 1:
A = 1
for i in range(len(__A ) ):
if select[i] == 1:
for j in range(len(chart[0] ) ):
if chart[i][j] == 1:
for k in range(len(__A ) ):
A = 0
temp.append(prime_implicants[i] )
while True:
A = 0
A = -1
A = 0
for i in range(len(__A ) ):
A = chart[i].count(1 )
if count_n > max_n:
A = count_n
A = i
if max_n == 0:
return temp
temp.append(prime_implicants[rem] )
for i in range(len(chart[0] ) ):
if chart[rem][i] == 1:
for j in range(len(__A ) ):
A = 0
def _snake_case ( snake_case__ : list[str] , snake_case__ : list[str] ):
A = [[0 for x in range(len(__A ) )] for x in range(len(__A ) )]
for i in range(len(__A ) ):
A = prime_implicants[i].count('_' )
for j in range(len(__A ) ):
if is_for_table(prime_implicants[i] , binary[j] , __A ):
A = 1
return chart
def _snake_case ( ):
A = int(input('Enter the no. of variables\n' ) )
A = [
float(__A )
for x in input(
'Enter the decimal representation of Minterms \'Spaces Separated\'\n' ).split()
]
A = decimal_to_binary(__A , __A )
A = check(__A )
print('Prime Implicants are:' )
print(__A )
A = prime_implicant_chart(__A , __A )
A = selection(__A , __A )
print('Essential Prime Implicants are:' )
print(__A )
if __name__ == "__main__":
import doctest
doctest.testmod()
main() | 708 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''PerceiverFeatureExtractor''']
_lowercase = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : int ):
if isinstance(__lowerCAmelCase , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(__lowerCAmelCase , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(__lowerCAmelCase ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: str = ['''pixel_values''']
def __init__( self : Union[str, Any] ,A_ : str = True ,A_ : int = None ,A_ : Optional[int] = PILImageResampling.BILINEAR ,A_ : Optional[Any] = True ,A_ : Any = None ,A_ : str = True ,A_ : Tuple = 1 / 255 ,A_ : Any = True ,A_ : Optional[Any] = True ,A_ : Union[str, Any] = None ,A_ : Union[str, Any] = None ,**A_ : str ,) -> Optional[Any]:
super().__init__(**_lowerCAmelCase )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(_lowerCAmelCase ,default_to_square=_lowerCAmelCase )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(_lowerCAmelCase ,param_name='crop_size' )
A = do_resize
A = size
A = do_center_crop
A = crop_size
A = resample
A = do_rescale
A = rescale_factor
A = offset
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : str ,A_ : Dict ,A_ : int = PILImageResampling.BILINEAR ,A_ : int = None ,**A_ : int ,) -> Tuple:
A = get_size_dict(_lowerCAmelCase ,default_to_square=_lowerCAmelCase )
if "shortest_edge" in size:
A = get_resize_output_image_size(_lowerCAmelCase ,size['shortest_edge'] ,default_to_square=_lowerCAmelCase )
elif "height" in size and "width" in size:
A = (size['height'], size['width'])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(_lowerCAmelCase ,size=_lowerCAmelCase ,resample=_lowerCAmelCase ,data_format=_lowerCAmelCase ,**_lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ,A_ : Optional[Any] ,A_ : Dict = None ,**A_ : List[str] ,) -> str:
A = get_size_dict(_lowerCAmelCase )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(_lowerCAmelCase ,size=(size['height'], size['width']) ,data_format=_lowerCAmelCase ,**_lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : Union[str, Any] ,A_ : Optional[Any] = True ,A_ : str = None ,**A_ : Tuple ,) -> str:
A = image.astype(np.floataa )
if offset:
A = image - (scale / 2)
return rescale(_lowerCAmelCase ,scale=_lowerCAmelCase ,data_format=_lowerCAmelCase ,**_lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Tuple ,A_ : Dict ,A_ : Tuple ,A_ : str = None ,**A_ : Tuple ,) -> Dict:
return normalize(_lowerCAmelCase ,mean=_lowerCAmelCase ,std=_lowerCAmelCase ,data_format=_lowerCAmelCase ,**_lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any ,A_ : Tuple = None ,A_ : Union[str, Any] = None ,A_ : str = None ,A_ : List[str] = None ,A_ : str = None ,A_ : Union[str, Any] = None ,A_ : str = None ,A_ : List[Any] = None ,A_ : Optional[int] = None ,A_ : Union[str, Any] = None ,A_ : List[Any] = None ,A_ : Tuple = ChannelDimension.FIRST ,) -> str:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
A = to_numpy_array(_lowerCAmelCase )
if do_resize:
A = self.resize(image=_lowerCAmelCase ,size=_lowerCAmelCase ,resample=_lowerCAmelCase )
if do_center_crop:
A = self.center_crop(_lowerCAmelCase ,size=_lowerCAmelCase )
if do_rescale:
A = self.rescale(image=_lowerCAmelCase ,scale=_lowerCAmelCase ,offset=_lowerCAmelCase )
if do_normalize:
A = self.normalize(image=_lowerCAmelCase ,mean=_lowerCAmelCase ,std=_lowerCAmelCase )
A = to_channel_dimension_format(_lowerCAmelCase ,_lowerCAmelCase )
return image
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Union[str, Any] ,A_ : List[str] = None ,A_ : Optional[Any] = None ,A_ : List[str] = None ,A_ : Dict = None ,A_ : Union[str, Any] = None ,A_ : Optional[Any] = None ,A_ : Optional[Any] = None ,A_ : Dict = None ,A_ : List[str] = None ,A_ : Dict = None ,A_ : Union[str, Any] = None ,A_ : Dict = None ,A_ : str = ChannelDimension.FIRST ,**A_ : Any ,) -> Dict:
A = do_resize if do_resize is not None else self.do_resize
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = offset if offset is not None else self.offset
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = size if size is not None else self.size
A = get_size_dict(_lowerCAmelCase ,default_to_square=_lowerCAmelCase )
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(_lowerCAmelCase ,param_name='crop_size' )
if not valid_images(_lowerCAmelCase ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
A = make_batched(_lowerCAmelCase )
A = [
[
self._preprocess_image(
image=_lowerCAmelCase ,do_resize=_lowerCAmelCase ,size=_lowerCAmelCase ,resample=_lowerCAmelCase ,do_center_crop=_lowerCAmelCase ,crop_size=_lowerCAmelCase ,do_rescale=_lowerCAmelCase ,rescale_factor=_lowerCAmelCase ,offset=_lowerCAmelCase ,do_normalize=_lowerCAmelCase ,image_mean=_lowerCAmelCase ,image_std=_lowerCAmelCase ,data_format=_lowerCAmelCase ,)
for img in video
]
for video in videos
]
A = {'pixel_values': videos}
return BatchFeature(data=_lowerCAmelCase ,tensor_type=_lowerCAmelCase ) | 709 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _snake_case ( snake_case__ : int ):
A = SwinvaConfig()
A = swinva_name.split('_' )
A = name_split[1]
if "to" in name_split[3]:
A = int(name_split[3][-3:] )
else:
A = int(name_split[3] )
if "to" in name_split[2]:
A = int(name_split[2][-2:] )
else:
A = int(name_split[2][6:] )
if model_size == "tiny":
A = 96
A = (2, 2, 6, 2)
A = (3, 6, 12, 24)
elif model_size == "small":
A = 96
A = (2, 2, 18, 2)
A = (3, 6, 12, 24)
elif model_size == "base":
A = 128
A = (2, 2, 18, 2)
A = (4, 8, 16, 32)
else:
A = 192
A = (2, 2, 18, 2)
A = (6, 12, 24, 48)
if "to" in swinva_name:
A = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
A = 2_1841
A = 'huggingface/label-files'
A = 'imagenet-22k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
else:
A = 1000
A = 'huggingface/label-files'
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = img_size
A = num_classes
A = embed_dim
A = depths
A = num_heads
A = window_size
return config
def _snake_case ( snake_case__ : List[Any] ):
if "patch_embed.proj" in name:
A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
A = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
A = 'encoder.' + name
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
A = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
A = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
A = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
A = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
A = 'layernorm.weight'
if name == "norm.bias":
A = 'layernorm.bias'
if "head" in name:
A = name.replace('head' , 'classifier' )
else:
A = 'swinv2.' + name
return name
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ):
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A = key.split('.' )
A = int(key_split[1] )
A = int(key_split[3] )
A = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[
dim : dim * 2
]
A = val[-dim:]
else:
A = val
return orig_state_dict
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ):
A = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A = get_swinva_config(snake_case__ )
A = SwinvaForImageClassification(snake_case__ )
model.eval()
A = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A = image_processor(images=snake_case__ , return_tensors='pt' )
A = timm_model(inputs['pixel_values'] )
A = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowercase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path) | 22 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : str ,A_ : Optional[int]=13 ,A_ : Optional[int]=7 ,A_ : List[str]=True ,A_ : Dict=True ,A_ : Dict=True ,A_ : Any=True ,A_ : Union[str, Any]=True ,A_ : Tuple=False ,A_ : List[str]=False ,A_ : List[str]=False ,A_ : int=2 ,A_ : Dict=99 ,A_ : Any=0 ,A_ : Tuple=32 ,A_ : int=5 ,A_ : Optional[int]=4 ,A_ : Tuple=0.1 ,A_ : Optional[Any]=0.1 ,A_ : Any=512 ,A_ : Union[str, Any]=12 ,A_ : str=2 ,A_ : str=0.02 ,A_ : str=3 ,A_ : str=4 ,A_ : Optional[int]="last" ,A_ : Dict=None ,A_ : Optional[int]=None ,) -> int:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
return FlaubertConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[Any] ,A_ : Tuple ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Optional[int] ,) -> List[str]:
A = FlaubertModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
A = model(UpperCamelCase_ ,lengths=UpperCamelCase_ ,langs=UpperCamelCase_ )
A = model(UpperCamelCase_ ,langs=UpperCamelCase_ )
A = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : str ,A_ : Any ,A_ : Any ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Tuple ,A_ : List[Any] ,A_ : Any ,A_ : str ,) -> Optional[int]:
A = FlaubertWithLMHeadModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
A = model(UpperCamelCase_ ,token_type_ids=UpperCamelCase_ ,labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Optional[int] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Tuple ,A_ : int ,A_ : int ,) -> List[str]:
A = FlaubertForQuestionAnsweringSimple(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
A = model(UpperCamelCase_ )
A = model(UpperCamelCase_ ,start_positions=UpperCamelCase_ ,end_positions=UpperCamelCase_ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : Dict ,A_ : Tuple ,A_ : Dict ,A_ : Dict ,A_ : Tuple ,A_ : Union[str, Any] ,) -> Tuple:
A = FlaubertForQuestionAnswering(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
A = model(UpperCamelCase_ )
A = model(
UpperCamelCase_ ,start_positions=UpperCamelCase_ ,end_positions=UpperCamelCase_ ,cls_index=UpperCamelCase_ ,is_impossible=UpperCamelCase_ ,p_mask=UpperCamelCase_ ,)
A = model(
UpperCamelCase_ ,start_positions=UpperCamelCase_ ,end_positions=UpperCamelCase_ ,cls_index=UpperCamelCase_ ,is_impossible=UpperCamelCase_ ,)
(A ) = result_with_labels.to_tuple()
A = model(UpperCamelCase_ ,start_positions=UpperCamelCase_ ,end_positions=UpperCamelCase_ )
(A ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Dict ,A_ : str ,A_ : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Optional[int] ,A_ : Union[str, Any] ,) -> Any:
A = FlaubertForSequenceClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
A = model(UpperCamelCase_ )
A = model(UpperCamelCase_ ,labels=UpperCamelCase_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : str ,A_ : Any ,A_ : Optional[Any] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : List[str] ,A_ : Dict ,) -> List[str]:
A = self.num_labels
A = FlaubertForTokenClassification(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
A = model(UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[str] ,A_ : Dict ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,) -> int:
A = self.num_choices
A = FlaubertForMultipleChoice(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
UpperCamelCase_ ,attention_mask=UpperCamelCase_ ,token_type_ids=UpperCamelCase_ ,labels=UpperCamelCase_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
A
) = config_and_inputs
A = {
"input_ids": input_ids,
"token_type_ids": token_type_ids,
"lengths": input_lengths,
"attention_mask": input_mask,
}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: Optional[Any] = (
{
"feature-extraction": FlaubertModel,
"fill-mask": FlaubertWithLMHeadModel,
"question-answering": FlaubertForQuestionAnsweringSimple,
"text-classification": FlaubertForSequenceClassification,
"token-classification": FlaubertForTokenClassification,
"zero-shot": FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : str ,A_ : Optional[int] ,A_ : Any ,A_ : Dict ) -> List[Any]:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : int ,A_ : int ,A_ : Union[str, Any]=False ) -> Dict:
A = super()._prepare_for_class(UpperCamelCase_ ,UpperCamelCase_ ,return_labels=UpperCamelCase_ )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=UpperCamelCase_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=UpperCamelCase_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
A = FlaubertModelTester(self )
A = ConfigTester(self ,config_class=UpperCamelCase_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*UpperCamelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*UpperCamelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*UpperCamelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*UpperCamelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*UpperCamelCase_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*UpperCamelCase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*UpperCamelCase_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = FlaubertModel.from_pretrained(UpperCamelCase_ )
self.assertIsNotNone(UpperCamelCase_ )
@slow
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
A = True
A = model_class(config=UpperCamelCase_ )
A = self._prepare_for_class(UpperCamelCase_ ,UpperCamelCase_ )
A = torch.jit.trace(
UpperCamelCase_ ,(inputs_dict['input_ids'].to('cpu' ), inputs_dict['attention_mask'].to('cpu' )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(UpperCamelCase_ ,os.path.join(UpperCamelCase_ ,'traced_model.pt' ) )
A = torch.jit.load(os.path.join(UpperCamelCase_ ,'traced_model.pt' ) ,map_location=UpperCamelCase_ )
loaded(inputs_dict['input_ids'].to(UpperCamelCase_ ) ,inputs_dict['attention_mask'].to(UpperCamelCase_ ) )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = FlaubertModel.from_pretrained('flaubert/flaubert_base_cased' )
A = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
with torch.no_grad():
A = model(UpperCamelCase_ )[0]
A = torch.Size((1, 11, 768) )
self.assertEqual(output.shape ,UpperCamelCase_ )
A = torch.tensor(
[[[-2.62_51, -1.42_98, -0.02_27], [-2.85_10, -1.63_87, 0.22_58], [-2.81_14, -1.18_32, -0.30_66]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,UpperCamelCase_ ,atol=1e-4 ) ) | 710 |
"""simple docstring"""
from math import pi, sqrt
def _snake_case ( snake_case__ : float ):
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(snake_case__ ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(snake_case__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _snake_case ( ):
assert gamma(0.5 ) == sqrt(snake_case__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = 1.0
while num:
_lowercase = float(input('''Gamma of: '''))
print(F"""gamma({num}) = {gamma(num)}""")
print('''\nEnter 0 to exit...''') | 22 | 0 |
"""simple docstring"""
import argparse
from pathlib import Path
import fairseq
import torch
from fairseq.models.xmod import XMODModel as FairseqXmodModel
from packaging import version
from transformers import XmodConfig, XmodForMaskedLM, XmodForSequenceClassification
from transformers.utils import logging
if version.parse(fairseq.__version__) < version.parse('''0.12.2'''):
raise Exception('''requires fairseq >= 0.12.2''')
if version.parse(fairseq.__version__) > version.parse('''2'''):
raise Exception('''requires fairseq < v2''')
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = 'Hello, World!'
_lowercase = 'en_XX'
def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : bool ):
A = Path('data_bin' )
A = FairseqXmodModel.from_pretrained(
model_name_or_path=str(Path(_lowerCAmelCase ).parent ) , checkpoint_file=Path(_lowerCAmelCase ).name , _name='xmod_base' , arch='xmod_base' , task='multilingual_masked_lm' , data_name_or_path=str(_lowerCAmelCase ) , bpe='sentencepiece' , sentencepiece_model=str(Path(_lowerCAmelCase ).parent / 'sentencepiece.bpe.model' ) , src_dict=str(data_dir / 'dict.txt' ) , )
xmod.eval() # disable dropout
print(_lowerCAmelCase )
A = xmod.model.encoder.sentence_encoder
A = XmodConfig(
vocab_size=xmod_sent_encoder.embed_tokens.num_embeddings , hidden_size=xmod.cfg.model.encoder_embed_dim , num_hidden_layers=xmod.cfg.model.encoder_layers , num_attention_heads=xmod.cfg.model.encoder_attention_heads , intermediate_size=xmod.cfg.model.encoder_ffn_embed_dim , max_position_embeddings=514 , type_vocab_size=1 , layer_norm_eps=1e-5 , pre_norm=xmod.cfg.model.encoder_normalize_before , adapter_reduction_factor=getattr(xmod.cfg.model , 'bottleneck' , 2 ) , adapter_layer_norm=xmod.cfg.model.adapter_layer_norm , adapter_reuse_layer_norm=xmod.cfg.model.adapter_reuse_layer_norm , ln_before_adapter=xmod.cfg.model.ln_before_adapter , languages=xmod.cfg.model.languages , )
if classification_head:
A = xmod.model.classification_heads["mnli"].out_proj.weight.shape[0]
print('Our X-MOD config:' , _lowerCAmelCase )
A = XmodForSequenceClassification(_lowerCAmelCase ) if classification_head else XmodForMaskedLM(_lowerCAmelCase )
model.eval()
# Now let's copy all the weights.
# Embeddings
A = xmod_sent_encoder.embed_tokens.weight
A = xmod_sent_encoder.embed_positions.weight
A = torch.zeros_like(
model.roberta.embeddings.token_type_embeddings.weight ) # just zero them out b/c xmod doesn't use them.
A = xmod_sent_encoder.layernorm_embedding.weight
A = xmod_sent_encoder.layernorm_embedding.bias
for i in range(config.num_hidden_layers ):
# Encoder: start of layer
A = model.roberta.encoder.layer[i]
A = xmod_sent_encoder.layers[i]
# self attention
A = layer.attention.self
if not (
xmod_layer.self_attn.k_proj.weight.data.shape
== xmod_layer.self_attn.q_proj.weight.data.shape
== xmod_layer.self_attn.v_proj.weight.data.shape
== torch.Size((config.hidden_size, config.hidden_size) )
):
raise AssertionError('Dimensions of self-attention weights do not match.' )
A = xmod_layer.self_attn.q_proj.weight
A = xmod_layer.self_attn.q_proj.bias
A = xmod_layer.self_attn.k_proj.weight
A = xmod_layer.self_attn.k_proj.bias
A = xmod_layer.self_attn.v_proj.weight
A = xmod_layer.self_attn.v_proj.bias
# self-attention output
A = layer.attention.output
if self_output.dense.weight.shape != xmod_layer.self_attn.out_proj.weight.shape:
raise AssertionError('Dimensions of self-attention output weights do not match.' )
A = xmod_layer.self_attn.out_proj.weight
A = xmod_layer.self_attn.out_proj.bias
A = xmod_layer.self_attn_layer_norm.weight
A = xmod_layer.self_attn_layer_norm.bias
# intermediate
A = layer.intermediate
if intermediate.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of intermediate weights do not match.' )
A = xmod_layer.fca.weight
A = xmod_layer.fca.bias
# output
A = layer.output
if bert_output.dense.weight.shape != xmod_layer.fca.weight.shape:
raise AssertionError('Dimensions of feed-forward weights do not match.' )
A = xmod_layer.fca.weight
A = xmod_layer.fca.bias
A = xmod_layer.final_layer_norm.weight
A = xmod_layer.final_layer_norm.bias
if bert_output.adapter_layer_norm is not None:
A = xmod_layer.adapter_layer_norm.weight
A = xmod_layer.adapter_layer_norm.bias
if sorted(bert_output.adapter_modules.keys() ) != sorted(xmod_layer.adapter_modules.keys() ):
raise AssertionError('Lists of language adapters do not match.' )
for lang_code, adapter in xmod_layer.adapter_modules.items():
A = bert_output.adapter_modules[lang_code]
A = xmod_layer.adapter_modules[lang_code]
A = from_adapter.fca.weight
A = from_adapter.fca.bias
A = from_adapter.fca.weight
A = from_adapter.fca.bias
# end of layer
if xmod_sent_encoder.layer_norm is not None:
A = xmod_sent_encoder.layer_norm.weight
A = xmod_sent_encoder.layer_norm.bias
if classification_head:
A = xmod.model.classification_heads["mnli"].dense.weight
A = xmod.model.classification_heads["mnli"].dense.bias
A = xmod.model.classification_heads["mnli"].out_proj.weight
A = xmod.model.classification_heads["mnli"].out_proj.bias
else:
# LM Head
A = xmod.model.encoder.lm_head.dense.weight
A = xmod.model.encoder.lm_head.dense.bias
A = xmod.model.encoder.lm_head.layer_norm.weight
A = xmod.model.encoder.lm_head.layer_norm.bias
A = xmod.model.encoder.lm_head.weight
A = xmod.model.encoder.lm_head.bias
# Let's check that we get the same results.
A = xmod.encode(_lowerCAmelCase ).unsqueeze(0 ) # batch of size 1
model.roberta.set_default_language(_lowerCAmelCase )
A = model(_lowerCAmelCase )[0]
if classification_head:
A = xmod.model.classification_heads["mnli"](xmod.extract_features(_lowerCAmelCase ) )
else:
A = xmod.model(_lowerCAmelCase , lang_id=[SAMPLE_LANGUAGE] )[0]
print(our_output.shape , their_output.shape )
A = torch.max(torch.abs(our_output - their_output ) ).item()
print(F'max_absolute_diff = {max_absolute_diff}' ) # ~ 1e-7
A = torch.allclose(_lowerCAmelCase , _lowerCAmelCase , atol=1e-3 )
print('Do both models output the same tensors?' , '🔥' if success else '💩' )
if not success:
raise Exception('Something went wRoNg' )
Path(_lowerCAmelCase ).mkdir(parents=_lowerCAmelCase , exist_ok=_lowerCAmelCase )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(_lowerCAmelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--xmod_checkpoint_path''', default=None, type=str, required=True, help='''Path the official PyTorch dump.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
parser.add_argument(
'''--classification_head''', action='''store_true''', help='''Whether to convert a final classification head.'''
)
_lowercase = parser.parse_args()
convert_xmod_checkpoint_to_pytorch(
args.xmod_checkpoint_path, args.pytorch_dump_folder_path, args.classification_head
) | 711 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: torch.FloatTensor
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : Dict=3 ,A_ : int=3 ,A_ : str=("DownEncoderBlock2D",) ,A_ : Dict=(64,) ,A_ : str=2 ,A_ : Union[str, Any]=32 ,A_ : Optional[int]="silu" ,A_ : str=True ,) -> Union[str, Any]:
super().__init__()
A = layers_per_block
A = torch.nn.Convad(
A_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
# down
A = block_out_channels[0]
for i, down_block_type in enumerate(A_ ):
A = output_channel
A = block_out_channels[i]
A = i == len(A_ ) - 1
A = get_down_block(
A_ ,num_layers=self.layers_per_block ,in_channels=A_ ,out_channels=A_ ,add_downsample=not is_final_block ,resnet_eps=1e-6 ,downsample_padding=0 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,)
self.down_blocks.append(A_ )
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# out
A = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = 2 * out_channels if double_z else out_channels
A = nn.Convad(block_out_channels[-1] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = x
A = self.conv_in(A_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : Dict ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
# down
if is_torch_version('>=' ,'1.11.0' ):
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,use_reentrant=A_ )
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,use_reentrant=A_ )
else:
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ )
# middle
A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,A_ )
else:
# down
for down_block in self.down_blocks:
A = down_block(A_ )
# middle
A = self.mid_block(A_ )
# post-process
A = self.conv_norm_out(A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any]=3 ,A_ : Optional[int]=3 ,A_ : str=("UpDecoderBlock2D",) ,A_ : Any=(64,) ,A_ : Optional[int]=2 ,A_ : Optional[int]=32 ,A_ : Tuple="silu" ,A_ : Optional[int]="group" ,) -> Any:
super().__init__()
A = layers_per_block
A = nn.Convad(
A_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
A = in_channels if norm_type == 'spatial' else None
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# up
A = list(reversed(A_ ) )
A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
A = output_channel
A = reversed_block_out_channels[i]
A = i == len(A_ ) - 1
A = get_up_block(
A_ ,num_layers=self.layers_per_block + 1 ,in_channels=A_ ,out_channels=A_ ,prev_output_channel=A_ ,add_upsample=not is_final_block ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,resnet_time_scale_shift=A_ ,)
self.up_blocks.append(A_ )
A = output_channel
# out
if norm_type == "spatial":
A = SpatialNorm(block_out_channels[0] ,A_ )
else:
A = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = nn.Convad(block_out_channels[0] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Union[str, Any]=None ) -> Any:
A = z
A = self.conv_in(A_ )
A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : List[Any] ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
if is_torch_version('>=' ,'1.11.0' ):
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ ,use_reentrant=A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,A_ ,use_reentrant=A_ )
else:
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ,A_ )
else:
# middle
A = self.mid_block(A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = up_block(A_ ,A_ )
# post-process
if latent_embeds is None:
A = self.conv_norm_out(A_ )
else:
A = self.conv_norm_out(A_ ,A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict=None ,A_ : List[Any]="random" ,A_ : Optional[int]=False ,A_ : str=True ) -> List[str]:
super().__init__()
A = n_e
A = vq_embed_dim
A = beta
A = legacy
A = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
A = remap
if self.remap is not None:
self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) )
A = self.used.shape[0]
A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A = self.re_embed
A = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
A = n_e
A = sane_index_shape
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Any:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
A = (inds[:, :, None] == used[None, None, ...]).long()
A = match.argmax(-1 )
A = match.sum(2 ) < 1
if self.unknown_index == "random":
A = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
A = self.unknown_index
return new.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
if self.re_embed > self.used.shape[0]: # extra token
A = 0 # simply set to zero
A = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,A_ )
return back.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> str:
# reshape z -> (batch, height, width, channel) and flatten
A = z.permute(0 ,2 ,3 ,1 ).contiguous()
A = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A = torch.argmin(torch.cdist(A_ ,self.embedding.weight ) ,dim=1 )
A = self.embedding(A_ ).view(z.shape )
A = None
A = None
# compute loss for embedding
if not self.legacy:
A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A = z + (z_q - z).detach()
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
A = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
A = self.remap_to_used(A_ )
A = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
A = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : str ) -> Union[str, Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A = indices.reshape(shape[0] ,-1 ) # add batch axis
A = self.unmap_to_all(A_ )
A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A = self.embedding(A_ )
if shape is not None:
A = z_q.view(A_ )
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : str ,A_ : Tuple ,A_ : Dict=False ) -> List[str]:
A = parameters
A , A = torch.chunk(A_ ,2 ,dim=1 )
A = torch.clamp(self.logvar ,-30.0 ,20.0 )
A = deterministic
A = torch.exp(0.5 * self.logvar )
A = torch.exp(self.logvar )
if self.deterministic:
A = A = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
A = randn_tensor(
self.mean.shape ,generator=A_ ,device=self.parameters.device ,dtype=self.parameters.dtype )
A = self.mean + self.std * sample
return x
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple=None ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : Union[str, Any]=[1, 2, 3] ) -> List[str]:
if self.deterministic:
return torch.Tensor([0.0] )
A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return self.mean | 22 | 0 |
"""simple docstring"""
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_distilbert import DistilBertTokenizer
_lowercase = logging.get_logger(__name__)
_lowercase = {"vocab_file": "vocab.txt", "tokenizer_file": "tokenizer.json"}
_lowercase = {
"vocab_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/vocab.txt",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/vocab.txt",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/vocab.txt"
),
"distilbert-base-german-cased": "https://huggingface.co/distilbert-base-german-cased/resolve/main/vocab.txt",
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/vocab.txt"
),
},
"tokenizer_file": {
"distilbert-base-uncased": "https://huggingface.co/distilbert-base-uncased/resolve/main/tokenizer.json",
"distilbert-base-uncased-distilled-squad": (
"https://huggingface.co/distilbert-base-uncased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-cased": "https://huggingface.co/distilbert-base-cased/resolve/main/tokenizer.json",
"distilbert-base-cased-distilled-squad": (
"https://huggingface.co/distilbert-base-cased-distilled-squad/resolve/main/tokenizer.json"
),
"distilbert-base-german-cased": (
"https://huggingface.co/distilbert-base-german-cased/resolve/main/tokenizer.json"
),
"distilbert-base-multilingual-cased": (
"https://huggingface.co/distilbert-base-multilingual-cased/resolve/main/tokenizer.json"
),
},
}
_lowercase = {
"distilbert-base-uncased": 5_12,
"distilbert-base-uncased-distilled-squad": 5_12,
"distilbert-base-cased": 5_12,
"distilbert-base-cased-distilled-squad": 5_12,
"distilbert-base-german-cased": 5_12,
"distilbert-base-multilingual-cased": 5_12,
}
_lowercase = {
"distilbert-base-uncased": {"do_lower_case": True},
"distilbert-base-uncased-distilled-squad": {"do_lower_case": True},
"distilbert-base-cased": {"do_lower_case": False},
"distilbert-base-cased-distilled-squad": {"do_lower_case": False},
"distilbert-base-german-cased": {"do_lower_case": False},
"distilbert-base-multilingual-cased": {"do_lower_case": False},
}
class lowerCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
_lowerCamelCase: str = VOCAB_FILES_NAMES
_lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: List[Any] = PRETRAINED_INIT_CONFIGURATION
_lowerCamelCase: List[str] = ["""input_ids""", """attention_mask"""]
_lowerCamelCase: List[Any] = DistilBertTokenizer
def __init__( self : str ,A_ : List[Any]=None ,A_ : Dict=None ,A_ : str=True ,A_ : int="[UNK]" ,A_ : Union[str, Any]="[SEP]" ,A_ : Tuple="[PAD]" ,A_ : Any="[CLS]" ,A_ : Dict="[MASK]" ,A_ : str=True ,A_ : int=None ,**A_ : Tuple ,) -> Optional[int]:
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get('lowercase' ,_a ) != do_lower_case
or normalizer_state.get('strip_accents' ,_a ) != strip_accents
or normalizer_state.get('handle_chinese_chars' ,_a ) != tokenize_chinese_chars
):
A = getattr(_a ,normalizer_state.pop('type' ) )
A = do_lower_case
A = strip_accents
A = tokenize_chinese_chars
A = normalizer_class(**_a )
A = do_lower_case
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : int ,A_ : Tuple=None ) -> Union[str, Any]:
A = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Any ,A_ : Optional[int] = None ) -> List[int]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Optional[Any] = None ) -> Tuple[str]:
A = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a ) | 712 |
"""simple docstring"""
def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ):
A = len(snake_case__ )
A = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
A = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
A = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 | 0 |
from . import (
albert,
align,
altclip,
audio_spectrogram_transformer,
auto,
autoformer,
bark,
bart,
barthez,
bartpho,
beit,
bert,
bert_generation,
bert_japanese,
bertweet,
big_bird,
bigbird_pegasus,
biogpt,
bit,
blenderbot,
blenderbot_small,
blip,
blip_a,
bloom,
bridgetower,
byta,
camembert,
canine,
chinese_clip,
clap,
clip,
clipseg,
codegen,
conditional_detr,
convbert,
convnext,
convnextva,
cpm,
cpmant,
ctrl,
cvt,
dataavec,
deberta,
deberta_va,
decision_transformer,
deformable_detr,
deit,
deprecated,
deta,
detr,
dialogpt,
dinat,
distilbert,
dit,
donut,
dpr,
dpt,
efficientformer,
efficientnet,
electra,
encodec,
encoder_decoder,
ernie,
ernie_m,
esm,
falcon,
flaubert,
flava,
fnet,
focalnet,
fsmt,
funnel,
git,
glpn,
gpta,
gpt_bigcode,
gpt_neo,
gpt_neox,
gpt_neox_japanese,
gpt_swa,
gptj,
gptsan_japanese,
graphormer,
groupvit,
herbert,
hubert,
ibert,
imagegpt,
informer,
instructblip,
jukebox,
layoutlm,
layoutlmva,
layoutlmva,
layoutxlm,
led,
levit,
lilt,
llama,
longformer,
longta,
luke,
lxmert,
mam_aaa,
marian,
markuplm,
maskaformer,
maskformer,
mbart,
mbartaa,
mega,
megatron_bert,
megatron_gpta,
mgp_str,
mluke,
mobilebert,
mobilenet_va,
mobilenet_va,
mobilevit,
mobilevitva,
mpnet,
mra,
mta,
musicgen,
mvp,
nat,
nezha,
nllb,
nllb_moe,
nystromformer,
oneformer,
open_llama,
openai,
opt,
owlvit,
pegasus,
pegasus_x,
perceiver,
phobert,
pixastruct,
plbart,
poolformer,
prophetnet,
qdqbert,
rag,
realm,
reformer,
regnet,
rembert,
resnet,
roberta,
roberta_prelayernorm,
roc_bert,
roformer,
rwkv,
sam,
segformer,
sew,
sew_d,
speech_encoder_decoder,
speech_to_text,
speech_to_text_a,
speechta,
splinter,
squeezebert,
swiftformer,
swin,
swinasr,
swinva,
switch_transformers,
ta,
table_transformer,
tapas,
time_series_transformer,
timesformer,
timm_backbone,
transfo_xl,
trocr,
tvlt,
umta,
unispeech,
unispeech_sat,
upernet,
videomae,
vilt,
vision_encoder_decoder,
vision_text_dual_encoder,
visual_bert,
vit,
vit_hybrid,
vit_mae,
vit_msn,
vivit,
wavaveca,
wavaveca_conformer,
wavaveca_phoneme,
wavaveca_with_lm,
wavlm,
whisper,
x_clip,
xglm,
xlm,
xlm_prophetnet,
xlm_roberta,
xlm_roberta_xl,
xlnet,
xmod,
yolos,
yoso,
) | 713 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[int]=2 ,A_ : Any=True ,A_ : List[str]=False ,A_ : Tuple=10 ,A_ : List[Any]=3 ,A_ : Any=32 * 8 ,A_ : Dict=32 * 8 ,A_ : List[Any]=4 ,A_ : Tuple=64 ,) -> List[str]:
A = parent
A = batch_size
A = is_training
A = use_auxiliary_loss
A = num_queries
A = num_channels
A = min_size
A = max_size
A = num_labels
A = hidden_dim
A = hidden_dim
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A_ )
A = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=A_ )
A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=A_ ) > 0.5
).float()
A = (torch.rand((self.batch_size, self.num_labels) ,device=A_ ) > 0.5).long()
A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
A = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
A = self.num_queries
A = self.num_labels
A = [1, 1, 1, 1]
A = self.num_channels
A = 64
A = 128
A = self.hidden_dim
A = self.hidden_dim
A = self.hidden_dim
return config
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A , A , A , A , A = self.prepare_config_and_inputs()
A = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = output.encoder_hidden_states
A = output.pixel_decoder_hidden_states
A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,config.decoder_layers )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : List[str] ,A_ : Union[str, Any]=False ) -> str:
with torch.no_grad():
A = MaskaFormerModel(config=A_ )
model.to(A_ )
model.eval()
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ ,output_hidden_states=A_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Any ,A_ : Dict ,A_ : Any ,A_ : Dict ) -> Optional[Any]:
A = MaskaFormerForUniversalSegmentation(config=A_ )
model.to(A_ )
model.eval()
def comm_check_on_output(A_ : str ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ )
comm_check_on_output(A_ )
A = model(
pixel_values=A_ ,pixel_mask=A_ ,mask_labels=A_ ,class_labels=A_ )
comm_check_on_output(A_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowerCamelCase: Optional[Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_lowerCamelCase: int = False
_lowerCamelCase: Dict = False
_lowerCamelCase: List[str] = False
_lowerCamelCase: int = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = MaskaFormerModelTester(self )
A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
A = MaskaFormerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = (self.model_tester.min_size,) * 2
A = {
'pixel_values': torch.randn((2, 3, *size) ,device=A_ ),
'mask_labels': torch.randn((2, 10, *size) ,device=A_ ),
'class_labels': torch.zeros(2 ,10 ,device=A_ ).long(),
}
A = self.model_tester.get_config()
A = MaskaFormerForUniversalSegmentation(A_ ).to(A_ )
A = model(**A_ )
self.assertTrue(outputs.loss is not None )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ ).to(A_ )
A = model(**A_ ,output_attentions=A_ )
self.assertTrue(outputs.attentions is not None )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
if not self.model_tester.is_training:
return
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = model_class(A_ )
model.to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = True
A = True
A = model_class(A_ ).to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ )
A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowercase = 1e-4
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A_ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
A = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
# masks_queries_logits
A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
A = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
A = torch.tensor(A_ ).to(A_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,A_ ,atol=A_ ) )
# class_queries_logits
A = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
A = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(A_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
A = inputs['pixel_values'].to(A_ )
A = [el.to(A_ ) for el in inputs['mask_labels']]
A = [el.to(A_ ) for el in inputs['class_labels']]
with torch.no_grad():
A = model(**A_ )
self.assertTrue(outputs.loss is not None ) | 22 | 0 |
def _snake_case ( snake_case__ : int = 1000 ):
A , A = 1, 1
A = []
for i in range(1 , n + 1 ):
A = prev_numerator + 2 * prev_denominator
A = prev_numerator + prev_denominator
if len(str(lowerCAmelCase__ ) ) > len(str(lowerCAmelCase__ ) ):
result.append(lowerCAmelCase__ )
A = numerator
A = denominator
return len(lowerCAmelCase__ )
if __name__ == "__main__":
print(F"""{solution() = }""") | 714 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = 99
A = 32
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.prepare_config_and_inputs()
A = True
A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict:
A = TFEsmModel(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]:
A = True
A = TFEsmModel(config=A_ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ ,encoder_hidden_states=A_ )
# Also check the case where encoder outputs are not passed
A = model(A_ ,attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict:
A = TFEsmForMaskedLM(config=A_ )
A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]:
A = self.num_labels
A = TFEsmForTokenClassification(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase: List[str] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = TFEsmModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFEsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A = model.get_bias()
assert isinstance(A_ ,A_ )
for k, v in name.items():
assert isinstance(A_ ,tf.Variable )
else:
A = model.get_output_embeddings()
assert x is None
A = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(A_ )[0]
A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,A_ )
# compare the actual values for a slice.
A = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A = model(A_ )[0]
# compare the actual values for a slice.
A = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) ) | 22 | 0 |
"""simple docstring"""
import unittest
from pathlib import Path
from tempfile import TemporaryDirectory
from transformers import AutoConfig, TFGPTaLMHeadModel, is_keras_nlp_available, is_tf_available
from transformers.models.gpta.tokenization_gpta import GPTaTokenizer
from transformers.testing_utils import require_keras_nlp, require_tf, slow
if is_tf_available():
import tensorflow as tf
if is_keras_nlp_available():
from transformers.models.gpta import TFGPTaTokenizer
_lowercase = ["gpt2"]
_lowercase = "gpt2"
if is_tf_available():
class lowerCAmelCase_ ( tf.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : List[str] ) -> Optional[Any]:
super().__init__()
A = tokenizer
A = AutoConfig.from_pretrained(_UpperCamelCase )
A = TFGPTaLMHeadModel.from_config(_UpperCamelCase )
@tf.function(input_signature=(tf.TensorSpec((None,) ,tf.string ,name='text' ),) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ) -> List[Any]:
A = self.tokenizer(_UpperCamelCase )
A = tokenized["""input_ids"""].to_tensor()
A = tf.cast(input_ids_dense > 0 ,tf.intaa )
# input_mask = tf.reshape(input_mask, [-1, MAX_SEQ_LEN])
A = self.model(input_ids=_UpperCamelCase ,attention_mask=_UpperCamelCase )["""logits"""]
return outputs
@require_tf
@require_keras_nlp
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
super().setUp()
A = [GPTaTokenizer.from_pretrained(_UpperCamelCase ) for checkpoint in (TOKENIZER_CHECKPOINTS)]
A = [TFGPTaTokenizer.from_pretrained(_UpperCamelCase ) for checkpoint in TOKENIZER_CHECKPOINTS]
assert len(self.tokenizers ) == len(self.tf_tokenizers )
A = [
"""This is a straightforward English test sentence.""",
"""This one has some weird characters\rto\nsee\r\nif those\u00E9break things.""",
"""Now we're going to add some Chinese: 一 二 三 一二三""",
"""And some much more rare Chinese: 齉 堃 齉堃""",
"""Je vais aussi écrire en français pour tester les accents""",
"""Classical Irish also has some unusual characters, so in they go: Gaelaċ, ꝼ""",
]
A = list(zip(self.test_sentences ,self.test_sentences[::-1] ) )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
for tokenizer, tf_tokenizer in zip(self.tokenizers ,self.tf_tokenizers ):
for test_inputs in self.test_sentences:
A = tokenizer([test_inputs] ,return_tensors='tf' )
A = tf_tokenizer([test_inputs] )
for key in python_outputs.keys():
# convert them to numpy to avoid messing with ragged tensors
A = python_outputs[key].numpy()
A = tf_outputs[key].numpy()
self.assertTrue(tf.reduce_all(python_outputs_values.shape == tf_outputs_values.shape ) )
self.assertTrue(tf.reduce_all(tf.cast(_UpperCamelCase ,tf.intaa ) == tf_outputs_values ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
for tf_tokenizer in self.tf_tokenizers:
A = tf.function(_UpperCamelCase )
for test_inputs in self.test_sentences:
A = tf.constant(_UpperCamelCase )
A = compiled_tokenizer(_UpperCamelCase )
A = tf_tokenizer(_UpperCamelCase )
for key in eager_outputs.keys():
self.assertTrue(tf.reduce_all(eager_outputs[key] == compiled_outputs[key] ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
for tf_tokenizer in self.tf_tokenizers:
A = ModelToSave(tokenizer=_UpperCamelCase )
A = tf.convert_to_tensor([self.test_sentences[0]] )
A = model.serving(_UpperCamelCase ) # Build model with some sample inputs
with TemporaryDirectory() as tempdir:
A = Path(_UpperCamelCase ) / """saved.model"""
tf.saved_model.save(_UpperCamelCase ,_UpperCamelCase ,signatures={'serving_default': model.serving} )
A = tf.saved_model.load(_UpperCamelCase )
A = loaded_model.signatures["""serving_default"""](_UpperCamelCase )["""output_0"""]
# We may see small differences because the loaded model is compiled, so we need an epsilon for the test
self.assertTrue(tf.reduce_all(out == loaded_output ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
for tf_tokenizer in self.tf_tokenizers:
A = tf.convert_to_tensor([self.test_sentences[0]] )
A = tf_tokenizer(_UpperCamelCase ) # Build model with some sample inputs
A = tf_tokenizer.get_config()
A = TFGPTaTokenizer.from_config(_UpperCamelCase )
A = model_from_config(_UpperCamelCase )
for key in from_config_output.keys():
self.assertTrue(tf.reduce_all(from_config_output[key] == out[key] ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
for tf_tokenizer in self.tf_tokenizers:
# for the test to run
A = 12_3123
for max_length in [3, 5, 1024]:
A = tf.convert_to_tensor([self.test_sentences[0]] )
A = tf_tokenizer(_UpperCamelCase ,max_length=_UpperCamelCase )
A = out["""input_ids"""].numpy().shape[1]
assert out_length == max_length | 715 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
_lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
_lowercase = '''|'''.join(sys.argv[1:])
_lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""")
_lowercase = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''') | 22 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Any , snake_case__ : Tuple ):
return not any(
neighbour == 1 and colored_vertices[i] == color
for i, neighbour in enumerate(_lowerCamelCase ) )
def _snake_case ( snake_case__ : Tuple , snake_case__ : Dict , snake_case__ : List[str] , snake_case__ : List[str] ):
if index == len(_lowerCamelCase ):
return True
# Recursive Step
for i in range(_lowerCamelCase ):
if valid_coloring(graph[index] , _lowerCamelCase , _lowerCamelCase ):
# Color current vertex
A = i
# Validate coloring
if util_color(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , index + 1 ):
return True
# Backtrack
A = -1
return False
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
A = [-1] * len(_lowerCamelCase )
if util_color(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , 0 ):
return colored_vertices
return [] | 716 |
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> int:
A = []
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Optional[int]:
return self.node_position[vertex]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> List[Any]:
A = pos
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Dict ,A_ : List[str] ) -> str:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A = 2 * start + 1
else:
A = 2 * start + 2
if heap[smallest_child] < heap[start]:
A , A = heap[smallest_child], positions[smallest_child]
A , A = (
heap[start],
positions[start],
)
A , A = temp, tempa
A = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,A_ )
self.top_to_bottom(A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Dict ,A_ : str ,A_ : Union[str, Any] ) -> Dict:
A = position[index]
while index != 0:
A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A = heap[parent]
A = position[parent]
self.set_position(position[parent] ,A_ )
else:
A = val
A = temp
self.set_position(A_ ,A_ )
break
A = parent
else:
A = val
A = temp
self.set_position(A_ ,0 )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Dict ) -> Union[str, Any]:
A = len(A_ ) // 2 - 1
for i in range(A_ ,-1 ,-1 ):
self.top_to_bottom(A_ ,A_ ,len(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Dict ) -> Union[str, Any]:
A = positions[0]
A = sys.maxsize
self.top_to_bottom(A_ ,0 ,len(A_ ) ,A_ )
return temp
def _snake_case ( snake_case__ : Dict ):
A = Heap()
A = [0] * len(snake_case__ )
A = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A = [] # Heap of Distance of vertices from their neighboring vertex
A = []
for vertex in range(len(snake_case__ ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case__ )
heap.node_position.append(snake_case__ )
A = []
A = 1
A = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A = 0
A = distance
heap.heapify(snake_case__ , snake_case__ )
for _ in range(1 , len(snake_case__ ) ):
A = heap.delete_minimum(snake_case__ , snake_case__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case__ )]
):
A = distance
heap.bottom_to_top(
snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ )
A = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowercase = int(input('''Enter number of edges: ''').strip())
_lowercase = defaultdict(list)
for _ in range(edges_number):
_lowercase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 22 | 0 |
"""simple docstring"""
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
_lowercase = 16
_lowercase = 32
def _snake_case ( snake_case__ : str , snake_case__ : List[str] = 16 , snake_case__ : Optional[Any] = "bert-base-cased" ):
A = AutoTokenizer.from_pretrained(_lowerCamelCase )
A = load_dataset('glue' , 'mrpc' )
def tokenize_function(snake_case__ : Dict ):
# max_length=None => use the model max length (it's actually the default)
A = tokenizer(examples['sentence1'] , examples['sentence2'] , truncation=_lowerCamelCase , max_length=_lowerCamelCase )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
A = datasets.map(
_lowerCamelCase , batched=_lowerCamelCase , remove_columns=['idx', 'sentence1', 'sentence2'] , load_from_cache_file=_lowerCamelCase )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
A = tokenized_datasets.rename_column('label' , 'labels' )
def collate_fn(snake_case__ : Dict ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_lowerCamelCase , padding='max_length' , max_length=128 , return_tensors='pt' )
return tokenizer.pad(_lowerCamelCase , padding='longest' , return_tensors='pt' )
# Instantiate dataloaders.
A = DataLoader(
tokenized_datasets['train'] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
A = DataLoader(
tokenized_datasets['validation'] , shuffle=_lowerCamelCase , collate_fn=_lowerCamelCase , batch_size=_lowerCamelCase )
return train_dataloader, eval_dataloader
def _snake_case ( snake_case__ : Dict , snake_case__ : int ):
# Initialize accelerator
A = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
A = config["lr"]
A = int(config['num_epochs'] )
A = int(config['seed'] )
A = int(config['batch_size'] )
A = args.model_name_or_path
set_seed(_lowerCamelCase )
A = get_dataloaders(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
A = AutoModelForSequenceClassification.from_pretrained(_lowerCamelCase , return_dict=_lowerCamelCase )
# Instantiate optimizer
A = (
AdamW
if accelerator.state.deepspeed_plugin is None
or "optimizer" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
A = optimizer_cls(params=model.parameters() , lr=_lowerCamelCase )
if accelerator.state.deepspeed_plugin is not None:
A = accelerator.state.deepspeed_plugin.deepspeed_config[
"gradient_accumulation_steps"
]
else:
A = 1
A = (len(_lowerCamelCase ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
A = get_linear_schedule_with_warmup(
optimizer=_lowerCamelCase , num_warmup_steps=0 , num_training_steps=_lowerCamelCase , )
else:
A = DummyScheduler(_lowerCamelCase , total_num_steps=_lowerCamelCase , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
A = accelerator.prepare(
_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
# We need to keep track of how many total steps we have iterated over
A = 0
# We also need to keep track of the stating epoch so files are named properly
A = 0
# Now we train the model
A = evaluate.load('glue' , 'mrpc' )
A = 0
A = {}
for epoch in range(_lowerCamelCase , _lowerCamelCase ):
model.train()
for step, batch in enumerate(_lowerCamelCase ):
A = model(**_lowerCamelCase )
A = outputs.loss
A = loss / gradient_accumulation_steps
accelerator.backward(_lowerCamelCase )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
model.eval()
A = 0
for step, batch in enumerate(_lowerCamelCase ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
A = model(**_lowerCamelCase )
A = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
A = accelerator.gather(
(predictions, batch['labels']) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_lowerCamelCase ) - 1:
A = predictions[: len(eval_dataloader.dataset ) - samples_seen]
A = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_lowerCamelCase , references=_lowerCamelCase , )
A = metric.compute()
# Use accelerator.print to print only on the main process.
accelerator.print(F'epoch {epoch}:' , _lowerCamelCase )
A = eval_metric["accuracy"]
if best_performance < eval_metric["accuracy"]:
A = eval_metric["accuracy"]
if args.performance_lower_bound is not None:
assert (
args.performance_lower_bound <= best_performance
), F'Best performance metric {best_performance} is lower than the lower bound {args.performance_lower_bound}'
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , 'all_results.json' ) , 'w' ) as f:
json.dump(_lowerCamelCase , _lowerCamelCase )
def _snake_case ( ):
A = argparse.ArgumentParser(description='Simple example of training script tracking peak GPU memory usage.' )
parser.add_argument(
'--model_name_or_path' , type=_lowerCamelCase , default='bert-base-cased' , help='Path to pretrained model or model identifier from huggingface.co/models.' , required=_lowerCamelCase , )
parser.add_argument(
'--output_dir' , type=_lowerCamelCase , default='.' , help='Optional save directory where all checkpoint folders will be stored. Default is the current working directory.' , )
parser.add_argument(
'--performance_lower_bound' , type=_lowerCamelCase , default=_lowerCamelCase , help='Optional lower bound for the performance metric. If set, the training will throw error when the performance metric drops below this value.' , )
parser.add_argument(
'--num_epochs' , type=_lowerCamelCase , default=3 , help='Number of train epochs.' , )
A = parser.parse_args()
A = {"lr": 2e-5, "num_epochs": args.num_epochs, "seed": 42, "batch_size": 16}
training_function(_lowerCamelCase , _lowerCamelCase )
if __name__ == "__main__":
main()
| 717 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase = True
except ImportError:
_lowercase = False
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( snake_case__ : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]:
A = testing
A = testing_file
A = path
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A = (
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file ,'r' ) as configuration_file:
A = json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,)
A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' ,'r' ) as configuration_file:
A = json.load(A_ )
A = configuration['lowercase_modelname']
A = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
A = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A = 'Flax' in generate_tensorflow_pytorch_and_flax
A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A_ ,exist_ok=A_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ):
pass
shutil.move(
F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(A_ : int ):
with open(A_ ,'r' ) as f:
A = f.readlines()
with open(A_ ,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A_ : str ,A_ : str ,A_ : List[str] ):
# Create temp file
A , A = mkstemp()
A = False
with fdopen(A_ ,'w' ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
A = True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A_ ,A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ ,A_ )
def skip_units(A_ : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A_ : Tuple ):
with open(A_ ) as datafile:
A = []
A = False
A = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# Below: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ ,A_ ,A_ )
A = []
elif "# Replace with" in line and "##" not in line:
A = []
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A_ ) | 22 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : int , snake_case__ : int ):
if b == 0:
return 1
if (b % 2) == 0:
return actual_power(a_ , int(b / 2 ) ) * actual_power(a_ , int(b / 2 ) )
else:
return a * actual_power(a_ , int(b / 2 ) ) * actual_power(a_ , int(b / 2 ) )
def _snake_case ( snake_case__ : int , snake_case__ : int ):
if b < 0:
return 1 / actual_power(a_ , a_ )
return actual_power(a_ , a_ )
if __name__ == "__main__":
print(power(-2, -3))
| 718 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str:
A = size if size is not None else {'height': 18, 'width': 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = ImageGPTImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'clusters' ) )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
self.assertTrue(hasattr(A_ ,'do_normalize' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
A = self.image_processing_class(**self.image_processor_dict )
A = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,obj[key] ) )
else:
self.assertEqual(obj[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(A_ ,'image_processor.json' )
image_processor_first.to_json_file(A_ )
A = self.image_processing_class.from_json_file(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(A_ )
A = self.image_processing_class.from_pretrained(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
pass
def _snake_case ( ):
A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
A = Image.open(dataset[4]['file'] )
A = Image.open(dataset[5]['file'] )
A = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
A = prepare_images()
# test non-batched
A = image_processing(images[0] ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1024) )
A = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ )
# test batched
A = image_processing(A_ ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1024) )
A = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ ) | 22 | 0 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _snake_case ( snake_case__ : str , snake_case__ : Optional[int]=None ):
A = None
if token is not None:
A = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
A = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100'
A = requests.get(_A , headers=_A ).json()
A = {}
try:
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
A = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_A ):
A = requests.get(url + F'&page={i + 2}' , headers=_A ).json()
job_links.update({job['name']: job['html_url'] for job in result['jobs']} )
return job_links
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _snake_case ( snake_case__ : str , snake_case__ : Tuple=None ):
A = None
if token is not None:
A = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
A = F'https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100'
A = requests.get(_A , headers=_A ).json()
A = {}
try:
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
A = math.ceil((result['total_count'] - 100) / 100 )
for i in range(_A ):
A = requests.get(url + F'&page={i + 2}' , headers=_A ).json()
artifacts.update({artifact['name']: artifact['archive_download_url'] for artifact in result['artifacts']} )
return artifacts
except Exception:
print(F'Unknown error, could not fetch links:\n{traceback.format_exc()}' )
return {}
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : str , snake_case__ : Optional[Any] , snake_case__ : Tuple ):
A = None
if token is not None:
A = {'Accept': 'application/vnd.github+json', 'Authorization': F'Bearer {token}'}
A = requests.get(_A , headers=_A , allow_redirects=_A )
A = result.headers['Location']
A = requests.get(_A , allow_redirects=_A )
A = os.path.join(_A , F'{artifact_name}.zip' )
with open(_A , 'wb' ) as fp:
fp.write(response.content )
def _snake_case ( snake_case__ : str , snake_case__ : List[Any]=None ):
A = []
A = []
A = None
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_A ) as f:
for line in f:
A = line.decode('UTF-8' ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
A = line[: line.index(': ' )]
A = line[line.index(': ' ) + len(': ' ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith('FAILED ' ):
# `test` is the test method that failed
A = line[len('FAILED ' ) :]
failed_tests.append(_A )
elif filename == "job_name.txt":
A = line
if len(_A ) != len(_A ):
raise ValueError(
F'`errors` and `failed_tests` should have the same number of elements. Got {len(_A )} for `errors` '
F'and {len(_A )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some'
' problem.' )
A = None
if job_name and job_links:
A = job_links.get(_A , _A )
# A list with elements of the form (line of error, error, failed test)
A = [x + [y] + [job_link] for x, y in zip(_A , _A )]
return result
def _snake_case ( snake_case__ : str , snake_case__ : Optional[Any]=None ):
A = []
A = [os.path.join(_A , _A ) for p in os.listdir(_A ) if p.endswith('.zip' )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_A , job_links=_A ) )
return errors
def _snake_case ( snake_case__ : Dict , snake_case__ : str=None ):
A = Counter()
counter.update([x[1] for x in logs] )
A = counter.most_common()
A = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
A = {'count': count, 'failed_tests': [(x[2], x[0]) for x in logs if x[1] == error]}
A = dict(sorted(r.items() , key=lambda snake_case__ : item[1]["count"] , reverse=_A ) )
return r
def _snake_case ( snake_case__ : Tuple ):
A = test.split('::' )[0]
if test.startswith('tests/models/' ):
A = test.split('/' )[2]
else:
A = None
return test
def _snake_case ( snake_case__ : List[Any] , snake_case__ : str=None ):
A = [(x[0], x[1], get_model(x[2] )) for x in logs]
A = [x for x in logs if x[2] is not None]
A = {x[2] for x in logs}
A = {}
for test in tests:
A = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
A = counter.most_common()
A = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
A = sum(error_counts.values() )
if n_errors > 0:
A = {'count': n_errors, 'errors': error_counts}
A = dict(sorted(r.items() , key=lambda snake_case__ : item[1]["count"] , reverse=_A ) )
return r
def _snake_case ( snake_case__ : Union[str, Any] ):
A = '| no. | error | status |'
A = '|-:|:-|:-|'
A = [header, sep]
for error in reduced_by_error:
A = reduced_by_error[error]['count']
A = F'| {count} | {error[:100]} | |'
lines.append(_A )
return "\n".join(_A )
def _snake_case ( snake_case__ : Union[str, Any] ):
A = '| model | no. of errors | major error | count |'
A = '|-:|-:|-:|-:|'
A = [header, sep]
for model in reduced_by_model:
A = reduced_by_model[model]['count']
A , A = list(reduced_by_model[model]['errors'].items() )[0]
A = F'| {model} | {count} | {error[:60]} | {_count} |'
lines.append(_A )
return "\n".join(_A )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
_lowercase = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
_lowercase = get_job_links(args.workflow_run_id, token=args.token)
_lowercase = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
_lowercase = k.find(''' / ''')
_lowercase = k[index + len(''' / ''') :]
_lowercase = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
_lowercase = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
_lowercase = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
_lowercase = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
_lowercase = counter.most_common(30)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
_lowercase = reduce_by_error(errors)
_lowercase = reduce_by_model(errors)
_lowercase = make_github_table(reduced_by_error)
_lowercase = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa) | 719 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _snake_case ( snake_case__ : Optional[int] ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' )
download_parser.add_argument(
'--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,)
download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' )
download_parser.set_defaults(func=A_ )
def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]:
A = model
A = cache
A = force
A = trust_remote_code
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code ) | 22 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from transformers.utils import is_vision_available
from transformers.utils.generic import TensorType
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import logging
if is_vision_available():
import PIL
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : Optional[Any] ):
if isinstance(snake_case__ , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(snake_case__ , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(snake_case__ ):
return [[videos]]
raise ValueError(F'Could not make batched video from {videos}' )
class lowerCAmelCase_ ( _a ):
'''simple docstring'''
_lowerCamelCase: Tuple = ["""pixel_values"""]
def __init__( self : str ,A_ : List[Any] = True ,A_ : List[Any] = None ,A_ : Union[str, Any] = PILImageResampling.BILINEAR ,A_ : Tuple = True ,A_ : List[str] = None ,A_ : Optional[Any] = True ,A_ : Dict = 1 / 255 ,A_ : List[str] = True ,A_ : Union[str, Any] = True ,A_ : Dict = None ,A_ : int = None ,**A_ : int ,) -> None:
super().__init__(**snake_case_ )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(snake_case_ ,default_to_square=snake_case_ )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(snake_case_ ,param_name='crop_size' )
A = do_resize
A = size
A = do_center_crop
A = crop_size
A = resample
A = do_rescale
A = rescale_factor
A = offset
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Dict = PILImageResampling.BILINEAR ,A_ : Tuple = None ,**A_ : Union[str, Any] ,) -> np.ndarray:
A = get_size_dict(snake_case_ ,default_to_square=snake_case_ )
if "shortest_edge" in size:
A = get_resize_output_image_size(snake_case_ ,size['shortest_edge'] ,default_to_square=snake_case_ )
elif "height" in size and "width" in size:
A = (size['height'], size['width'])
else:
raise ValueError(F'Size must have \'height\' and \'width\' or \'shortest_edge\' as keys. Got {size.keys()}' )
return resize(snake_case_ ,size=snake_case_ ,resample=snake_case_ ,data_format=snake_case_ ,**snake_case_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Union[str, Any] ,A_ : Any = None ,**A_ : Optional[Any] ,) -> np.ndarray:
A = get_size_dict(snake_case_ )
if "height" not in size or "width" not in size:
raise ValueError(F'Size must have \'height\' and \'width\' as keys. Got {size.keys()}' )
return center_crop(snake_case_ ,size=(size['height'], size['width']) ,data_format=snake_case_ ,**snake_case_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : str ,A_ : Dict = True ,A_ : int = None ,**A_ : List[Any] ,) -> str:
A = image.astype(np.floataa )
if offset:
A = image - (scale / 2)
return rescale(snake_case_ ,scale=snake_case_ ,data_format=snake_case_ ,**snake_case_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : int ,A_ : int ,A_ : int ,A_ : Optional[int] = None ,**A_ : Optional[int] ,) -> np.ndarray:
return normalize(snake_case_ ,mean=snake_case_ ,std=snake_case_ ,data_format=snake_case_ ,**snake_case_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ,A_ : Tuple = None ,A_ : Optional[Any] = None ,A_ : Dict = None ,A_ : Tuple = None ,A_ : Dict = None ,A_ : Optional[int] = None ,A_ : Dict = None ,A_ : List[str] = None ,A_ : Any = None ,A_ : List[Any] = None ,A_ : Union[str, Any] = None ,A_ : Dict = ChannelDimension.FIRST ,) -> np.ndarray:
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
if offset and not do_rescale:
raise ValueError('For offset, do_rescale must also be set to True.' )
# All transformations expect numpy arrays.
A = to_numpy_array(snake_case_ )
if do_resize:
A = self.resize(image=snake_case_ ,size=snake_case_ ,resample=snake_case_ )
if do_center_crop:
A = self.center_crop(snake_case_ ,size=snake_case_ )
if do_rescale:
A = self.rescale(image=snake_case_ ,scale=snake_case_ ,offset=snake_case_ )
if do_normalize:
A = self.normalize(image=snake_case_ ,mean=snake_case_ ,std=snake_case_ )
A = to_channel_dimension_format(snake_case_ ,snake_case_ )
return image
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ,A_ : str = None ,A_ : Optional[Any] = None ,A_ : Dict = None ,A_ : str = None ,A_ : Any = None ,A_ : Optional[Any] = None ,A_ : Optional[Any] = None ,A_ : Tuple = None ,A_ : List[Any] = None ,A_ : Any = None ,A_ : List[Any] = None ,A_ : int = None ,A_ : str = ChannelDimension.FIRST ,**A_ : Any ,) -> PIL.Image.Image:
A = do_resize if do_resize is not None else self.do_resize
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = offset if offset is not None else self.offset
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = size if size is not None else self.size
A = get_size_dict(snake_case_ ,default_to_square=snake_case_ )
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(snake_case_ ,param_name='crop_size' )
if not valid_images(snake_case_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
A = make_batched(snake_case_ )
A = [
[
self._preprocess_image(
image=snake_case_ ,do_resize=snake_case_ ,size=snake_case_ ,resample=snake_case_ ,do_center_crop=snake_case_ ,crop_size=snake_case_ ,do_rescale=snake_case_ ,rescale_factor=snake_case_ ,offset=snake_case_ ,do_normalize=snake_case_ ,image_mean=snake_case_ ,image_std=snake_case_ ,data_format=snake_case_ ,)
for img in video
]
for video in videos
]
A = {'pixel_values': videos}
return BatchFeature(data=snake_case_ ,tensor_type=snake_case_ ) | 720 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''spm_char.model'''}
_lowercase = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
_lowercase = {
'''microsoft/speecht5_asr''': 10_24,
'''microsoft/speecht5_tts''': 10_24,
'''microsoft/speecht5_vc''': 10_24,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[Any]="</s>" ,A_ : Optional[Any]="<unk>" ,A_ : str="<pad>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[str] ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Any:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Optional[int] ,A_ : str ) -> Tuple:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> List[Any]:
A = self.sp_model.IdToPiece(A_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]:
A = []
A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
A = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : Optional[int]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ )
A = [1]
if token_ids_a is None:
return ([0] * len(A_ )) + suffix_ones
return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 22 | 0 |
"""simple docstring"""
from sklearn.metrics import matthews_corrcoef
import datasets
_lowercase = '''
Compute the Matthews correlation coefficient (MCC)
The Matthews correlation coefficient is used in machine learning as a
measure of the quality of binary and multiclass classifications. It takes
into account true and false positives and negatives and is generally
regarded as a balanced measure which can be used even if the classes are of
very different sizes. The MCC is in essence a correlation coefficient value
between -1 and +1. A coefficient of +1 represents a perfect prediction, 0
an average random prediction and -1 an inverse prediction. The statistic
is also known as the phi coefficient. [source: Wikipedia]
'''
_lowercase = '''
Args:
predictions (list of int): Predicted labels, as returned by a model.
references (list of int): Ground truth labels.
sample_weight (list of int, float, or bool): Sample weights. Defaults to `None`.
Returns:
matthews_correlation (dict containing float): Matthews correlation.
Examples:
Example 1, a basic example with only predictions and references as inputs:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3])
>>> print(round(results[\'matthews_correlation\'], 2))
0.54
Example 2, the same example as above, but also including sample weights:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 3, 1, 1, 1, 2])
>>> print(round(results[\'matthews_correlation\'], 2))
0.1
Example 3, the same example as above, but with sample weights that cause a negative correlation:
>>> matthews_metric = datasets.load_metric("matthews_correlation")
>>> results = matthews_metric.compute(references=[1, 3, 2, 0, 3, 2],
... predictions=[1, 2, 2, 0, 3, 3],
... sample_weight=[0.5, 1, 0, 0, 0, 1])
>>> print(round(results[\'matthews_correlation\'], 2))
-0.25
'''
_lowercase = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __lowercase ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('int32' ),
'references': datasets.Value('int32' ),
} ) ,reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.matthews_corrcoef.html'
] ,)
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : List[str]=None ) -> str:
return {
"matthews_correlation": float(matthews_corrcoef(_lowercase ,_lowercase ,sample_weight=_lowercase ) ),
} | 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPFeatureExtractor''']
_lowercase = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : List[Any] = 100_0000 ):
A = [i - 1 for i in range(limit + 1 )]
for i in range(2 , limit + 1 ):
if phi[i] == i - 1:
for j in range(2 * i , limit + 1 , __SCREAMING_SNAKE_CASE ):
phi[j] -= phi[j] // i
return sum(phi[2 : limit + 1] )
if __name__ == "__main__":
print(solution()) | 700 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any ,A_ : Optional[Any] ,A_ : Tuple=13 ,A_ : Optional[Any]=7 ,A_ : Dict=True ,A_ : Optional[Any]=True ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : Optional[Any]=True ,A_ : Tuple=False ,A_ : Optional[int]=False ,A_ : str=False ,A_ : int=2 ,A_ : Union[str, Any]=99 ,A_ : int=0 ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : Any=4 ,A_ : str=0.1 ,A_ : Any=0.1 ,A_ : int=512 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.02 ,A_ : Optional[Any]=2 ,A_ : List[str]=4 ,A_ : Optional[int]="last" ,A_ : str=True ,A_ : List[str]=None ,A_ : List[Any]=0 ,) -> int:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : int ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,) -> Tuple:
A = XLMModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,lengths=A_ ,langs=A_ )
A = model(A_ ,langs=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ,A_ : Any ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : List[str] ,A_ : List[str] ,) -> Union[str, Any]:
A = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : Tuple ,A_ : str ,A_ : int ,A_ : str ,A_ : Optional[Any] ,A_ : Any ,A_ : Any ,A_ : Dict ,) -> List[str]:
A = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
A = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : List[str] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : Any ,) -> Optional[int]:
A = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)
((A) , ) = result_with_labels.to_tuple()
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
((A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : str ,) -> List[Any]:
A = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : str ,A_ : Tuple ,A_ : List[str] ,A_ : Dict ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Dict ,A_ : Any ,) -> Any:
A = self.num_labels
A = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : str ,A_ : int ,A_ : Tuple ,A_ : List[Any] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : int ,) -> Tuple:
A = self.num_choices
A = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: Dict = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase: Optional[Any] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : str=False ) -> Dict:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = XLMModelTester(self )
A = ConfigTester(self ,config_class=A_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : List[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Tuple=1 ) -> List[Any]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) )
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = min_length + idx + 1
A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Tuple=False ,A_ : Optional[Any]=1 ) -> List[str]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,)
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,)
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(A_ )
A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president
A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A = model.generate(A_ ,do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ ) | 22 | 0 |
"""simple docstring"""
from operator import delitem, getitem, setitem
import pytest
from data_structures.hashing.hash_map import HashMap
def _snake_case ( snake_case__ : Union[str, Any] ):
return getitem, k
def _snake_case ( snake_case__ : int , snake_case__ : Optional[Any] ):
return setitem, k, v
def _snake_case ( snake_case__ : List[Any] ):
return delitem, k
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple , *snake_case__ : List[Any] ):
try:
return fun(snake_case__ , *snake_case__ ), None
except Exception as e:
return None, e
_lowercase = (
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
)
_lowercase = [
_set('''key_a''', '''val_a'''),
_set('''key_a''', '''val_b'''),
]
_lowercase = [
_set('''key_a''', '''val_a'''),
_set('''key_b''', '''val_b'''),
_del('''key_a'''),
_del('''key_b'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
]
_lowercase = [
_get('''key_a'''),
_del('''key_a'''),
_set('''key_a''', '''val_a'''),
_del('''key_a'''),
_del('''key_a'''),
_get('''key_a'''),
]
_lowercase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
]
_lowercase = [
*[_set(x, x) for x in range(5)], # guaranteed upsize
*[_del(x) for x in range(5)],
_set('''key_a''', '''val_b'''),
]
@pytest.mark.parametrize(
'operations' , (
pytest.param(_add_items , id='add items' ),
pytest.param(_overwrite_items , id='overwrite items' ),
pytest.param(_delete_items , id='delete items' ),
pytest.param(_access_absent_items , id='access absent items' ),
pytest.param(_add_with_resize_up , id='add with resize up' ),
pytest.param(_add_with_resize_down , id='add with resize down' ),
) , )
def _snake_case ( snake_case__ : Tuple ):
A = HashMap(initial_block_size=4 )
A = {}
for _, (fun, *args) in enumerate(snake_case__ ):
A , A = _run_operation(snake_case__ , snake_case__ , *snake_case__ )
A , A = _run_operation(snake_case__ , snake_case__ , *snake_case__ )
assert my_res == py_res
assert str(snake_case__ ) == str(snake_case__ )
assert set(snake_case__ ) == set(snake_case__ )
assert len(snake_case__ ) == len(snake_case__ )
assert set(my.items() ) == set(py.items() )
def _snake_case ( ):
def is_public(snake_case__ : str ) -> bool:
return not name.startswith('_' )
A = {name for name in dir({} ) if is_public(snake_case__ )}
A = {name for name in dir(HashMap() ) if is_public(snake_case__ )}
assert dict_public_names > hash_public_names | 701 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_lowercase = 8
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int]=BITS ):
A = x.device
A = (x * 255).int().clamp(0 , 255 )
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b c h w -> b c 1 h w' )
A = ((x & mask) != 0).float()
A = rearrange(snake_case__ , 'b c d h w -> b (c d) h w' )
A = bits * 2 - 1
return bits
def _snake_case ( snake_case__ : Any , snake_case__ : Any=BITS ):
A = x.device
A = (x > 0).int()
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b (c d) h w -> b c d h w' , d=8 )
A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = True , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
A = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
A = self.alphas_cumprod[timestep]
A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
A = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
A = self._get_variance(snake_case__ , snake_case__ )
A = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
A = model_output.device if torch.is_tensor(snake_case__ ) else 'cpu'
A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ )
A = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise
A = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Tuple="epsilon" , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
A = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
A , A = torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
A = None
# 1. compute alphas, betas
A = self.alphas_cumprod[t]
A = self.alphas_cumprod[t - 1] if t > 0 else self.one
A = 1 - alpha_prod_t
A = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
A = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A = 0
if t > 0:
A = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device )
A = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,A_ : UNetaDConditionModel ,A_ : Union[DDIMScheduler, DDPMScheduler] ,A_ : Optional[float] = 1.0 ,) -> Optional[int]:
super().__init__()
A = bit_scale
A = (
ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A_ ,scheduler=A_ )
@torch.no_grad()
def __call__( self : Tuple ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 50 ,A_ : Optional[torch.Generator] = None ,A_ : Optional[int] = 1 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]:
A = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,)
A = decimal_to_bits(A_ ) * self.bit_scale
A = latents.to(self.device )
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
A = self.unet(A_ ,A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample
A = bits_to_decimal(A_ )
if output_type == "pil":
A = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ ) | 22 | 0 |
"""simple docstring"""
from ....configuration_utils import PretrainedConfig
from ....utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
"""Visual-Attention-Network/van-base""": (
"""https://huggingface.co/Visual-Attention-Network/van-base/blob/main/config.json"""
),
}
class lowerCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = '''van'''
def __init__( self : Union[str, Any] ,A_ : Dict=224 ,A_ : Tuple=3 ,A_ : Optional[int]=[7, 3, 3, 3] ,A_ : Optional[Any]=[4, 2, 2, 2] ,A_ : Dict=[64, 128, 320, 512] ,A_ : Tuple=[3, 3, 12, 3] ,A_ : Optional[Any]=[8, 8, 4, 4] ,A_ : Any="gelu" ,A_ : Any=0.02 ,A_ : List[Any]=1e-6 ,A_ : Optional[int]=1e-2 ,A_ : str=0.0 ,A_ : str=0.0 ,**A_ : List[Any] ,) -> List[Any]:
super().__init__(**__A )
A = image_size
A = num_channels
A = patch_sizes
A = strides
A = hidden_sizes
A = depths
A = mlp_ratios
A = hidden_act
A = initializer_range
A = layer_norm_eps
A = layer_scale_init_value
A = drop_path_rate
A = dropout_rate | 702 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''yolos'''
def __init__( self : Dict ,A_ : Optional[Any]=768 ,A_ : int=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.0 ,A_ : List[Any]=0.0 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : List[Any]=[512, 864] ,A_ : Union[str, Any]=16 ,A_ : List[str]=3 ,A_ : Optional[int]=True ,A_ : Tuple=100 ,A_ : str=True ,A_ : Optional[Any]=False ,A_ : Any=1 ,A_ : Optional[Any]=5 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=5 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.1 ,**A_ : Tuple ,) -> Any:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return 12 | 22 | 0 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Optional[int] ):
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def _snake_case ( snake_case__ : Dict , snake_case__ : Optional[int] , snake_case__ : str ):
A = tmp_path / "cache"
A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def _snake_case ( snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] ):
A = tmp_path / "cache"
A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A = features.copy() if features else default_expected_features
A = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_3': 'float64', 'col_1': 'string', 'col_2': 'int64'},
] , )
def _snake_case ( snake_case__ : int , snake_case__ : str , snake_case__ : List[Any] ):
A = tmp_path / "cache"
A = {"col_3": "float64", "col_1": "string", "col_2": "int64"}
A = features.copy() if features else default_expected_features
A = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : str ):
A = {"col_2": "int64", "col_3": "float64", "col_1": "string"}
A = features.copy()
A = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A = tmp_path / "cache"
A = JsonDatasetReader(_lowerCamelCase , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
assert isinstance(_lowerCamelCase , _lowerCamelCase )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def _snake_case ( snake_case__ : str , snake_case__ : Optional[int] , snake_case__ : Optional[int] ):
A = tmp_path / "cache"
A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase , split=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize('path_type' , [str, list] )
def _snake_case ( snake_case__ : Any , snake_case__ : Tuple , snake_case__ : List[Any] ):
if issubclass(_lowerCamelCase , _lowerCamelCase ):
A = jsonl_path
elif issubclass(_lowerCamelCase , _lowerCamelCase ):
A = [jsonl_path]
A = tmp_path / "cache"
A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_dataset(_lowerCamelCase , _lowerCamelCase )
def _snake_case ( snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : Union[str, Any]=("train",) ):
assert isinstance(_lowerCamelCase , _lowerCamelCase )
for split in splits:
A = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize('keep_in_memory' , [False, True] )
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : int ):
A = tmp_path / "cache"
A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
A = JsonDatasetReader({'train': jsonl_path} , cache_dir=_lowerCamelCase , keep_in_memory=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize(
'features' , [
None,
{'col_1': 'string', 'col_2': 'int64', 'col_3': 'float64'},
{'col_1': 'string', 'col_2': 'string', 'col_3': 'string'},
{'col_1': 'int32', 'col_2': 'int32', 'col_3': 'int32'},
{'col_1': 'float32', 'col_2': 'float32', 'col_3': 'float32'},
] , )
def _snake_case ( snake_case__ : str , snake_case__ : Any , snake_case__ : Tuple ):
A = tmp_path / "cache"
A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A = features.copy() if features else default_expected_features
A = (
Features({feature: Value(_lowerCamelCase ) for feature, dtype in features.items()} ) if features is not None else None
)
A = JsonDatasetReader({'train': jsonl_path} , features=_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase )
@pytest.mark.parametrize('split' , [None, NamedSplit('train' ), 'train', 'test'] )
def _snake_case ( snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : List[Any] ):
if split:
A = {split: jsonl_path}
else:
A = "train"
A = {"train": jsonl_path, "test": jsonl_path}
A = tmp_path / "cache"
A = {"col_1": "string", "col_2": "int64", "col_3": "float64"}
A = JsonDatasetReader(_lowerCamelCase , cache_dir=_lowerCamelCase ).read()
_check_json_datasetdict(_lowerCamelCase , _lowerCamelCase , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _snake_case ( snake_case__ : Any ):
return json.load(_lowerCamelCase )
def _snake_case ( snake_case__ : Any ):
return [json.loads(_lowerCamelCase ) for line in buffer]
class lowerCAmelCase_ :
'''simple docstring'''
@pytest.mark.parametrize('lines, load_json_function' ,[(True, load_json_lines), (False, load_json)] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ,A_ : List[str] ,A_ : Optional[int] ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(A_ ,A_ ,lines=A_ ).write()
buffer.seek(0 )
A = load_json_function(A_ )
assert isinstance(A_ ,A_ )
assert isinstance(exported_content[0] ,A_ )
assert len(A_ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' ,[
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ,A_ : Tuple ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : List[str] ) -> str:
with io.BytesIO() as buffer:
JsonDatasetWriter(A_ ,A_ ,lines=A_ ,orient=A_ ).write()
buffer.seek(0 )
A = load_json(A_ )
assert isinstance(A_ ,A_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(A_ ,'keys' ) and not hasattr(exported_content[0] ,'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(A_ ) == 10
@pytest.mark.parametrize('lines, load_json_function' ,[(True, load_json_lines), (False, load_json)] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : str ) -> Optional[Any]:
with io.BytesIO() as buffer:
JsonDatasetWriter(A_ ,A_ ,lines=A_ ,num_proc=2 ).write()
buffer.seek(0 )
A = load_json_function(A_ )
assert isinstance(A_ ,A_ )
assert isinstance(exported_content[0] ,A_ )
assert len(A_ ) == 10
@pytest.mark.parametrize(
'orient, container, keys, len_at' ,[
('records', list, {'tokens', 'labels', 'answers', 'id'}, None),
('split', dict, {'columns', 'data'}, 'data'),
('index', dict, set('0123456789' ), None),
('columns', dict, {'tokens', 'labels', 'answers', 'id'}, 'tokens'),
('values', list, None, None),
('table', dict, {'schema', 'data'}, 'data'),
] ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ) -> Any:
with io.BytesIO() as buffer:
JsonDatasetWriter(A_ ,A_ ,lines=A_ ,orient=A_ ,num_proc=2 ).write()
buffer.seek(0 )
A = load_json(A_ )
assert isinstance(A_ ,A_ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(A_ ,'keys' ) and not hasattr(exported_content[0] ,'keys' )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(A_ ) == 10
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Dict ) -> Optional[Any]:
with pytest.raises(A_ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(A_ ,A_ ,num_proc=0 )
@pytest.mark.parametrize('compression, extension' ,[('gzip', 'gz'), ('bz2', 'bz2'), ('xz', 'xz')] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ) -> Tuple:
A = tmp_path_factory.mktemp('data' ) / F'test.json.{extension}'
A = str(shared_datadir / F'test_file.json.{extension}' )
JsonDatasetWriter(A_ ,A_ ,compression=A_ ).write()
with fsspec.open(A_ ,'rb' ,compression='infer' ) as f:
A = f.read()
with fsspec.open(A_ ,'rb' ,compression='infer' ) as f:
A = f.read()
assert exported_content == original_content
| 703 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=0 ) -> Dict:
A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) )
A = np.random.RandomState(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = ort.SessionOptions()
A = False
return options
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 22 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict ,A_ : int ,A_ : Tuple=13 ,A_ : str=7 ,A_ : str=True ,A_ : Optional[Any]=True ,A_ : List[Any]=True ,A_ : Optional[Any]=True ,A_ : List[Any]=99 ,A_ : Union[str, Any]=32 ,A_ : Optional[int]=5 ,A_ : Optional[int]=4 ,A_ : Union[str, Any]=37 ,A_ : List[str]="gelu" ,A_ : Tuple=0.1 ,A_ : str=0.1 ,A_ : Optional[int]=512 ,A_ : Union[str, Any]=16 ,A_ : Dict=2 ,A_ : Any=0.02 ,A_ : List[Any]=4 ,) -> Optional[Any]:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_attention_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_choices
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_attention_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A = RoFormerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
A = self.prepare_config_and_inputs()
A = config_and_inputs
A = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class lowerCAmelCase_ ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = True
_lowerCamelCase: Union[str, Any] = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
A = FlaxRoFormerModelTester(self )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
for model_class_name in self.all_model_classes:
A = model_class_name.from_pretrained('junnyu/roformer_chinese_small' ,from_pt=_a )
A = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = FlaxRoFormerForMaskedLM.from_pretrained('junnyu/roformer_chinese_base' )
A = jnp.array([[0, 1, 2, 3, 4, 5]] )
A = model(_a )[0]
A = 5_0000
A = (1, 6, vocab_size)
self.assertEqual(output.shape ,_a )
A = jnp.array(
[[[-0.12_05, -1.02_65, 0.29_22], [-1.51_34, 0.19_74, 0.15_19], [-5.01_35, -3.90_03, -0.84_04]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] ,_a ,atol=1e-4 ) ) | 704 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''pixel_values''']
def __init__( self : Optional[Any] ,A_ : bool = True ,A_ : Optional[Dict[str, int]] = None ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Optional[Any] ,) -> None:
super().__init__(**A_ )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(A_ ,default_to_square=A_ )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ ,default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ )
return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : float ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[str] ) -> np.ndarray:
return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Any ,) -> np.ndarray:
return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : ImageInput ,A_ : Optional[bool] = None ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : Optional[bool] = None ,A_ : Optional[float] = None ,A_ : Optional[bool] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**A_ : Tuple ,) -> List[Any]:
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(A_ ,default_to_square=A_ )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(A_ ) for image in images]
if do_resize:
A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images]
if do_center_crop:
A = [self.center_crop(image=A_ ,size=A_ ) for image in images]
if do_rescale:
A = [self.rescale(image=A_ ,scale=A_ ) for image in images]
if do_normalize:
A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images]
A = [to_channel_dimension_format(A_ ,A_ ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=A_ ,tensor_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : List[Tuple] = None ) -> str:
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(A_ ):
A = target_sizes.numpy()
A = []
for idx in range(len(A_ ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=A_ )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 22 | 0 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( snake_case__ : str ):
return [ord(snake_case__ ) - 96 for elem in plain]
def _snake_case ( snake_case__ : list[int] ):
return "".join(chr(elem + 96 ) for elem in encoded )
def _snake_case ( ):
A = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , snake_case__ )
print('Decoded:' , decode(snake_case__ ) )
if __name__ == "__main__":
main() | 705 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowercase = data_utils.TransfoXLTokenizer
_lowercase = data_utils.TransfoXLCorpus
_lowercase = data_utils
_lowercase = data_utils
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case__ , 'rb' ) as fp:
A = pickle.load(snake_case__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
A = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
A = corpus.vocab.__dict__
torch.save(snake_case__ , snake_case__ )
A = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , snake_case__ )
A = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(snake_case__ , snake_case__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
A = os.path.abspath(snake_case__ )
A = os.path.abspath(snake_case__ )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
A = TransfoXLConfig()
else:
A = TransfoXLConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
A = TransfoXLLMHeadModel(snake_case__ )
A = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
A = os.path.join(snake_case__ , snake_case__ )
A = os.path.join(snake_case__ , snake_case__ )
print(F'Save PyTorch model to {os.path.abspath(snake_case__ )}' )
torch.save(model.state_dict() , snake_case__ )
print(F'Save configuration file to {os.path.abspath(snake_case__ )}' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
_lowercase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 22 | 0 |
"""simple docstring"""
import torch
from torch import nn
from torch.nn import CrossEntropyLoss, MSELoss
from transformers.file_utils import add_start_docstrings, add_start_docstrings_to_model_forward
from transformers.models.bert.modeling_bert import (
BERT_INPUTS_DOCSTRING,
BERT_START_DOCSTRING,
BertEmbeddings,
BertLayer,
BertPooler,
BertPreTrainedModel,
)
def _snake_case ( snake_case__ : Optional[int] ):
A = torch.exp(__snake_case )
A = torch.sum(__snake_case , dim=1 ) # sum of exp(x_i)
A = torch.sum(x * exp_x , dim=1 ) # sum of x_i * exp(x_i)
return torch.log(__snake_case ) - B / A
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Dict ) -> List[Any]:
super().__init__()
A = config.output_attentions
A = config.output_hidden_states
A = nn.ModuleList([BertLayer(__lowerCAmelCase ) for _ in range(config.num_hidden_layers )] )
A = nn.ModuleList([BertHighway(__lowerCAmelCase ) for _ in range(config.num_hidden_layers )] )
A = [-1 for _ in range(config.num_hidden_layers )]
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> Optional[Any]:
if (type(__lowerCAmelCase ) is float) or (type(__lowerCAmelCase ) is int):
for i in range(len(self.early_exit_entropy ) ):
A = x
else:
A = x
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[str] ) -> Union[str, Any]:
A = pooler.state_dict()
for highway in self.highway:
for name, param in highway.pooler.state_dict().items():
param.copy_(loaded_model[name] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : List[Any]=None ,A_ : List[Any]=None ,A_ : Dict=None ,A_ : List[str]=None ,) -> Tuple:
A = ()
A = ()
A = ()
for i, layer_module in enumerate(self.layer ):
if self.output_hidden_states:
A = all_hidden_states + (hidden_states,)
A = layer_module(
__lowerCAmelCase ,__lowerCAmelCase ,head_mask[i] ,__lowerCAmelCase ,__lowerCAmelCase )
A = layer_outputs[0]
if self.output_attentions:
A = all_attentions + (layer_outputs[1],)
A = (hidden_states,)
if self.output_hidden_states:
A = current_outputs + (all_hidden_states,)
if self.output_attentions:
A = current_outputs + (all_attentions,)
A = self.highway[i](__lowerCAmelCase )
# logits, pooled_output
if not self.training:
A = highway_exit[0]
A = entropy(__lowerCAmelCase )
A = highway_exit + (highway_entropy,) # logits, hidden_states(?), entropy
A = all_highway_exits + (highway_exit,)
if highway_entropy < self.early_exit_entropy[i]:
A = (highway_logits,) + current_outputs[1:] + (all_highway_exits,)
raise HighwayException(__lowerCAmelCase ,i + 1 )
else:
A = all_highway_exits + (highway_exit,)
# Add last layer
if self.output_hidden_states:
A = all_hidden_states + (hidden_states,)
A = (hidden_states,)
if self.output_hidden_states:
A = outputs + (all_hidden_states,)
if self.output_attentions:
A = outputs + (all_attentions,)
A = outputs + (all_highway_exits,)
return outputs # last-layer hidden state, (all hidden states), (all attentions), all highway exits
@add_start_docstrings(
'''The Bert Model transformer with early exiting (DeeBERT). ''' , _lowercase , )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : Any ) -> Tuple:
super().__init__(__lowerCAmelCase )
A = config
A = BertEmbeddings(__lowerCAmelCase )
A = DeeBertEncoder(__lowerCAmelCase )
A = BertPooler(__lowerCAmelCase )
self.init_weights()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
self.encoder.init_highway_pooler(self.pooler )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
return self.embeddings.word_embeddings
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Tuple ) -> Tuple:
A = value
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int ) -> List[str]:
for layer, heads in heads_to_prune.items():
self.encoder.layer[layer].attention.prune_heads(__lowerCAmelCase )
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int]=None ,A_ : str=None ,A_ : Dict=None ,A_ : str=None ,A_ : Optional[int]=None ,A_ : int=None ,A_ : Optional[Any]=None ,A_ : Dict=None ,) -> Dict:
if input_ids is not None and inputs_embeds is not None:
raise ValueError('You cannot specify both input_ids and inputs_embeds at the same time' )
elif input_ids is not None:
A = input_ids.size()
elif inputs_embeds is not None:
A = inputs_embeds.size()[:-1]
else:
raise ValueError('You have to specify either input_ids or inputs_embeds' )
A = input_ids.device if input_ids is not None else inputs_embeds.device
if attention_mask is None:
A = torch.ones(__lowerCAmelCase ,device=__lowerCAmelCase )
if encoder_attention_mask is None:
A = torch.ones(__lowerCAmelCase ,device=__lowerCAmelCase )
if token_type_ids is None:
A = torch.zeros(__lowerCAmelCase ,dtype=torch.long ,device=__lowerCAmelCase )
# We can provide a self-attention mask of dimensions [batch_size, from_seq_length, to_seq_length]
# ourselves in which case we just need to make it broadcastable to all heads.
A = self.get_extended_attention_mask(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# If a 2D ou 3D attention mask is provided for the cross-attention
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
if encoder_attention_mask.dim() == 3:
A = encoder_attention_mask[:, None, :, :]
if encoder_attention_mask.dim() == 2:
A = encoder_attention_mask[:, None, None, :]
A = encoder_extended_attention_mask.to(
dtype=next(self.parameters() ).dtype ) # fp16 compatibility
A = (1.0 - encoder_extended_attention_mask) * -1_0000.0
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
A = self.get_head_mask(__lowerCAmelCase ,self.config.num_hidden_layers )
A = self.embeddings(
input_ids=__lowerCAmelCase ,position_ids=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,inputs_embeds=__lowerCAmelCase )
A = self.encoder(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,head_mask=__lowerCAmelCase ,encoder_hidden_states=__lowerCAmelCase ,encoder_attention_mask=__lowerCAmelCase ,)
A = encoder_outputs[0]
A = self.pooler(__lowerCAmelCase )
A = (
sequence_output,
pooled_output,
) + encoder_outputs[
1:
] # add hidden_states and attentions if they are here
return outputs # sequence_output, pooled_output, (hidden_states), (attentions), highway exits
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Dict ,A_ : List[Any] ,A_ : Dict ) -> List[Any]:
A = message
A = exit_layer # start from 1!
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Any ,A_ : Optional[int] ) -> str:
super().__init__()
A = BertPooler(__lowerCAmelCase )
A = nn.Dropout(config.hidden_dropout_prob )
A = nn.Linear(config.hidden_size ,config.num_labels )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[int] ) -> Union[str, Any]:
A = encoder_outputs[0]
A = self.pooler(__lowerCAmelCase )
# "return" pooler_output
# BertModel
A = (pooler_input, pooler_output) + encoder_outputs[1:]
# "return" bmodel_output
# Dropout and classification
A = bmodel_output[1]
A = self.dropout(__lowerCAmelCase )
A = self.classifier(__lowerCAmelCase )
return logits, pooled_output
@add_start_docstrings(
'''Bert Model (with early exiting - DeeBERT) with a classifier on top,
also takes care of multi-layer training. ''' , _lowercase , )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,A_ : Any ) -> List[Any]:
super().__init__(__lowerCAmelCase )
A = config.num_labels
A = config.num_hidden_layers
A = DeeBertModel(__lowerCAmelCase )
A = nn.Dropout(config.hidden_dropout_prob )
A = nn.Linear(config.hidden_size ,self.config.num_labels )
self.init_weights()
@add_start_docstrings_to_model_forward(__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Optional[int]=None ,A_ : Any=None ,A_ : str=None ,A_ : str=None ,A_ : Any=None ,A_ : int=None ,A_ : Any=None ,A_ : List[Any]=-1 ,A_ : Union[str, Any]=False ,) -> int:
A = self.num_layers
try:
A = self.bert(
__lowerCAmelCase ,attention_mask=__lowerCAmelCase ,token_type_ids=__lowerCAmelCase ,position_ids=__lowerCAmelCase ,head_mask=__lowerCAmelCase ,inputs_embeds=__lowerCAmelCase ,)
# sequence_output, pooled_output, (hidden_states), (attentions), highway exits
A = outputs[1]
A = self.dropout(__lowerCAmelCase )
A = self.classifier(__lowerCAmelCase )
A = (logits,) + outputs[2:] # add hidden states and attention if they are here
except HighwayException as e:
A = e.message
A = e.exit_layer
A = outputs[0]
if not self.training:
A = entropy(__lowerCAmelCase )
A = []
A = []
if labels is not None:
if self.num_labels == 1:
# We are doing regression
A = MSELoss()
A = loss_fct(logits.view(-1 ) ,labels.view(-1 ) )
else:
A = CrossEntropyLoss()
A = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
# work with highway exits
A = []
for highway_exit in outputs[-1]:
A = highway_exit[0]
if not self.training:
highway_logits_all.append(__lowerCAmelCase )
highway_entropy.append(highway_exit[2] )
if self.num_labels == 1:
# We are doing regression
A = MSELoss()
A = loss_fct(highway_logits.view(-1 ) ,labels.view(-1 ) )
else:
A = CrossEntropyLoss()
A = loss_fct(highway_logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
highway_losses.append(__lowerCAmelCase )
if train_highway:
A = (sum(highway_losses[:-1] ),) + outputs
# exclude the final highway, of course
else:
A = (loss,) + outputs
if not self.training:
A = outputs + ((original_entropy, highway_entropy), exit_layer)
if output_layer >= 0:
A = (
(outputs[0],) + (highway_logits_all[output_layer],) + outputs[2:]
) # use the highway of the last layer
return outputs # (loss), logits, (hidden_states), (attentions), (highway_exits)
| 706 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ) -> int:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Optional[Any]=1 ) -> int:
if self.graph.get(A_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A = [[w, v]]
if not self.graph.get(A_ ):
A = []
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ) -> Optional[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int=-2 ,A_ : Dict=-1 ) -> str:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any=-1 ) -> int:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any]=-2 ) -> Optional[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ) -> Any:
A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> str:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any]=-2 ) -> Any:
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return sorted_nodes
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple=-2 ,A_ : List[str]=-1 ) -> str:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any]=-2 ) -> Dict:
A = time()
self.bfs(A_ )
A = time()
return end - begin
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> Tuple:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[str]=1 ) -> Dict:
# check if the u exists
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A = [[w, v]]
# add the other way
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A = [[w, u]]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : List[str] ) -> List[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
# the other way round
if self.graph.get(A_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=-2 ,A_ : List[Any]=-1 ) -> int:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=-1 ) -> List[Any]:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict=-2 ) -> List[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[Any]:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any]=-2 ,A_ : List[str]=-1 ) -> Any:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=-2 ) -> Union[str, Any]:
A = time()
self.bfs(A_ )
A = time()
return end - begin | 22 | 0 |
"""simple docstring"""
import random
def _snake_case ( snake_case__ : int , snake_case__ : float , snake_case__ : bool = False ):
A = {i: [] for i in range(UpperCAmelCase__ )}
# if probability is greater or equal than 1, then generate a complete graph
if probability >= 1:
return complete_graph(UpperCAmelCase__ )
# if probability is lower or equal than 0, then return a graph without edges
if probability <= 0:
return graph
# for each couple of nodes, add an edge from u to v
# if the number randomly generated is greater than probability probability
for i in range(UpperCAmelCase__ ):
for j in range(i + 1 , UpperCAmelCase__ ):
if random.random() < probability:
graph[i].append(UpperCAmelCase__ )
if not directed:
# if the graph is undirected, add an edge in from j to i, either
graph[j].append(UpperCAmelCase__ )
return graph
def _snake_case ( snake_case__ : int ):
return {
i: [j for j in range(UpperCAmelCase__ ) if i != j] for i in range(UpperCAmelCase__ )
}
if __name__ == "__main__":
import doctest
doctest.testmod() | 707 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _snake_case ( snake_case__ : str = "isbn/0140328726" ):
A = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
A = F'{olid} is not a valid Open Library olid'
raise ValueError(snake_case__ )
return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json()
def _snake_case ( snake_case__ : dict ):
A = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
A = data['First sentence']['value']
for key, value in data.items():
if isinstance(snake_case__ , snake_case__ ):
A = ', '.join(snake_case__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowercase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
_lowercase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""") | 22 | 0 |
"""simple docstring"""
from maths.prime_check import is_prime
def _snake_case ( snake_case__ : int ):
if not isinstance(_lowercase , _lowercase ):
A = F'Input value of [number={number}] must be an integer'
raise TypeError(_lowercase )
if is_prime(_lowercase ) and is_prime(number + 2 ):
return number + 2
else:
return -1
if __name__ == "__main__":
import doctest
doctest.testmod() | 708 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''PerceiverFeatureExtractor''']
_lowercase = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 0 |
import inspect
import unittest
from transformers import ConvNextVaConfig
from transformers.models.auto import get_values
from transformers.models.auto.modeling_auto import MODEL_FOR_BACKBONE_MAPPING_NAMES, MODEL_MAPPING_NAMES
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import ConvNextVaBackbone, ConvNextVaForImageClassification, ConvNextVaModel
from transformers.models.convnextva.modeling_convnextva import CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[str] ,A_ : Optional[int] ,A_ : Optional[int]=13 ,A_ : str=32 ,A_ : str=3 ,A_ : int=4 ,A_ : List[str]=[10, 20, 30, 40] ,A_ : Any=[2, 2, 3, 2] ,A_ : Any=True ,A_ : int=True ,A_ : str=37 ,A_ : List[Any]="gelu" ,A_ : int=10 ,A_ : str=0.02 ,A_ : int=["stage2", "stage3", "stage4"] ,A_ : List[str]=[2, 3, 4] ,A_ : str=None ,) -> Optional[Any]:
A = parent
A = batch_size
A = image_size
A = num_channels
A = num_stages
A = hidden_sizes
A = depths
A = is_training
A = use_labels
A = intermediate_size
A = hidden_act
A = num_labels
A = initializer_range
A = out_features
A = out_indices
A = scope
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.num_labels )
A = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
return ConvNextVaConfig(
num_channels=self.num_channels ,hidden_sizes=self.hidden_sizes ,depths=self.depths ,num_stages=self.num_stages ,hidden_act=self.hidden_act ,is_decoder=__lowerCAmelCase ,initializer_range=self.initializer_range ,out_features=self.out_features ,out_indices=self.out_indices ,num_labels=self.num_labels ,)
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str] ,A_ : Optional[Any] ,A_ : Dict ) -> Optional[int]:
A = ConvNextVaModel(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A = model(__lowerCAmelCase )
# expected last hidden states: B, C, H // 32, W // 32
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], self.image_size // 32, self.image_size // 32) ,)
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Dict ,A_ : Dict ,A_ : Union[str, Any] ) -> Dict:
A = ConvNextVaForImageClassification(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A = model(__lowerCAmelCase ,labels=__lowerCAmelCase )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : Optional[int] ) -> int:
A = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A = model(__lowerCAmelCase )
# verify hidden states
self.parent.assertEqual(len(result.feature_maps ) ,len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[1], 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,len(config.out_features ) )
self.parent.assertListEqual(model.channels ,config.hidden_sizes[1:] )
# verify backbone works with out_features=None
A = None
A = ConvNextVaBackbone(config=__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
A = model(__lowerCAmelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) ,1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) ,[self.batch_size, self.hidden_sizes[-1], 1, 1] )
# verify channels
self.parent.assertEqual(len(model.channels ) ,1 )
self.parent.assertListEqual(model.channels ,[config.hidden_sizes[-1]] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'pixel_values': pixel_values, 'labels': labels}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( UpperCAmelCase__ , UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = (
(
ConvNextVaModel,
ConvNextVaForImageClassification,
ConvNextVaBackbone,
)
if is_torch_available()
else ()
)
_lowerCamelCase: Optional[int] = (
{'''feature-extraction''': ConvNextVaModel, '''image-classification''': ConvNextVaForImageClassification}
if is_torch_available()
else {}
)
_lowerCamelCase: Tuple = False
_lowerCamelCase: int = False
_lowerCamelCase: List[Any] = False
_lowerCamelCase: List[str] = False
_lowerCamelCase: Any = False
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
A = ConvNextVaModelTester(self )
A = ConfigTester(self ,config_class=__lowerCAmelCase ,has_text_modality=__lowerCAmelCase ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
return
@unittest.skip(reason='ConvNextV2 does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
pass
@unittest.skip(reason='ConvNextV2 does not support input and output embeddings' )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
pass
@unittest.skip(reason='ConvNextV2 does not use feedforward chunking' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
pass
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
A , A = self.model_tester.prepare_config_and_inputs_with_labels()
A = True
if model_class.__name__ in [
*get_values(__lowerCAmelCase ),
*get_values(__lowerCAmelCase ),
]:
continue
A = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.train()
A = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase )
A = model(**__lowerCAmelCase ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
if not self.model_tester.is_training:
return
for model_class in self.all_model_classes:
A , A = self.model_tester.prepare_config_and_inputs_with_labels()
A = False
A = True
if (
model_class.__name__
in [*get_values(__lowerCAmelCase ), *get_values(__lowerCAmelCase )]
or not model_class.supports_gradient_checkpointing
):
continue
A = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.gradient_checkpointing_enable()
model.train()
A = self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ,return_labels=__lowerCAmelCase )
A = model(**__lowerCAmelCase ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__lowerCAmelCase )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
def check_hidden_states_output(A_ : Union[str, Any] ,A_ : Dict ,A_ : Tuple ):
A = model_class(__lowerCAmelCase )
model.to(__lowerCAmelCase )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(__lowerCAmelCase ,__lowerCAmelCase ) )
A = outputs.encoder_hidden_states if config.is_encoder_decoder else outputs.hidden_states
A = self.model_tester.num_stages
self.assertEqual(len(__lowerCAmelCase ) ,expected_num_stages + 1 )
# ConvNextV2's feature maps are of shape (batch_size, num_channels, height, width)
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) ,[self.model_tester.image_size // 4, self.model_tester.image_size // 4] ,)
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = True
check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(__lowerCAmelCase ,__lowerCAmelCase ,__lowerCAmelCase )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__lowerCAmelCase )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
for model_name in CONVNEXTV2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = ConvNextVaModel.from_pretrained(__lowerCAmelCase )
self.assertIsNotNone(__lowerCAmelCase )
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return AutoImageProcessor.from_pretrained('facebook/convnextv2-tiny-1k-224' ) if is_vision_available() else None
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
A = ConvNextVaForImageClassification.from_pretrained('facebook/convnextv2-tiny-1k-224' ).to(__lowerCAmelCase )
A = self.default_image_processor
A = prepare_img()
A = preprocessor(images=__lowerCAmelCase ,return_tensors='pt' ).to(__lowerCAmelCase )
# forward pass
with torch.no_grad():
A = model(**__lowerCAmelCase )
# verify the logits
A = torch.Size((1, 1000) )
self.assertEqual(outputs.logits.shape ,__lowerCAmelCase )
A = torch.tensor([0.99_96, 0.19_66, -0.43_86] ).to(__lowerCAmelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] ,__lowerCAmelCase ,atol=1e-4 ) ) | 709 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _snake_case ( snake_case__ : int ):
A = SwinvaConfig()
A = swinva_name.split('_' )
A = name_split[1]
if "to" in name_split[3]:
A = int(name_split[3][-3:] )
else:
A = int(name_split[3] )
if "to" in name_split[2]:
A = int(name_split[2][-2:] )
else:
A = int(name_split[2][6:] )
if model_size == "tiny":
A = 96
A = (2, 2, 6, 2)
A = (3, 6, 12, 24)
elif model_size == "small":
A = 96
A = (2, 2, 18, 2)
A = (3, 6, 12, 24)
elif model_size == "base":
A = 128
A = (2, 2, 18, 2)
A = (4, 8, 16, 32)
else:
A = 192
A = (2, 2, 18, 2)
A = (6, 12, 24, 48)
if "to" in swinva_name:
A = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
A = 2_1841
A = 'huggingface/label-files'
A = 'imagenet-22k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
else:
A = 1000
A = 'huggingface/label-files'
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = img_size
A = num_classes
A = embed_dim
A = depths
A = num_heads
A = window_size
return config
def _snake_case ( snake_case__ : List[Any] ):
if "patch_embed.proj" in name:
A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
A = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
A = 'encoder.' + name
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
A = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
A = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
A = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
A = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
A = 'layernorm.weight'
if name == "norm.bias":
A = 'layernorm.bias'
if "head" in name:
A = name.replace('head' , 'classifier' )
else:
A = 'swinv2.' + name
return name
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ):
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A = key.split('.' )
A = int(key_split[1] )
A = int(key_split[3] )
A = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[
dim : dim * 2
]
A = val[-dim:]
else:
A = val
return orig_state_dict
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ):
A = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A = get_swinva_config(snake_case__ )
A = SwinvaForImageClassification(snake_case__ )
model.eval()
A = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A = image_processor(images=snake_case__ , return_tensors='pt' )
A = timm_model(inputs['pixel_values'] )
A = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowercase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path) | 22 | 0 |
"""simple docstring"""
import os
import re
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''vocab_file''': '''vocab.txt''',
'''merges_file''': '''bpe.codes''',
}
_lowercase = {
'''vocab_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/vocab.txt''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/vocab.txt''',
},
'''merges_file''': {
'''vinai/phobert-base''': '''https://huggingface.co/vinai/phobert-base/resolve/main/bpe.codes''',
'''vinai/phobert-large''': '''https://huggingface.co/vinai/phobert-large/resolve/main/bpe.codes''',
},
}
_lowercase = {
'''vinai/phobert-base''': 2_56,
'''vinai/phobert-large''': 2_56,
}
def _snake_case ( snake_case__ : Any ):
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
A = set(_snake_case )
return pairs
class lowerCAmelCase_ ( __UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = VOCAB_FILES_NAMES
_lowerCamelCase: Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: Optional[int] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : Optional[Any] ,A_ : Dict ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[int]="</s>" ,A_ : Union[str, Any]="</s>" ,A_ : Any="<s>" ,A_ : Optional[int]="<unk>" ,A_ : Dict="<pad>" ,A_ : str="<mask>" ,**A_ : List[Any] ,) -> Optional[Any]:
super().__init__(
bos_token=UpperCAmelCase_ ,eos_token=UpperCAmelCase_ ,unk_token=UpperCAmelCase_ ,sep_token=UpperCAmelCase_ ,cls_token=UpperCAmelCase_ ,pad_token=UpperCAmelCase_ ,mask_token=UpperCAmelCase_ ,**UpperCAmelCase_ ,)
A = vocab_file
A = merges_file
A = {}
A = 0
A = 1
A = 2
A = 3
self.add_from_file(UpperCAmelCase_ )
A = {v: k for k, v in self.encoder.items()}
with open(UpperCAmelCase_ ,encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[:-1]
A = [tuple(merge.split()[:-1] ) for merge in merges]
A = dict(zip(UpperCAmelCase_ ,range(len(UpperCAmelCase_ ) ) ) )
A = {}
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Tuple ,A_ : Optional[Any] = None ) -> str:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A = [self.cls_token_id]
A = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : int ,A_ : Optional[Any] = None ,A_ : List[Any] = False ) -> Optional[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=UpperCAmelCase_ ,token_ids_a=UpperCAmelCase_ ,already_has_special_tokens=UpperCAmelCase_ )
if token_ids_a is None:
return [1] + ([0] * len(UpperCAmelCase_ )) + [1]
return [1] + ([0] * len(UpperCAmelCase_ )) + [1, 1] + ([0] * len(UpperCAmelCase_ )) + [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : int ,A_ : int = None ) -> int:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
return len(self.encoder )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
return dict(self.encoder ,**self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ) -> Optional[int]:
if token in self.cache:
return self.cache[token]
A = tuple(UpperCAmelCase_ )
A = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
A = get_pairs(UpperCAmelCase_ )
if not pairs:
return token
while True:
A = min(UpperCAmelCase_ ,key=lambda A_ : self.bpe_ranks.get(UpperCAmelCase_ ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(UpperCAmelCase_ ):
try:
A = word.index(UpperCAmelCase_ ,UpperCAmelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(UpperCAmelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(UpperCAmelCase_ )
A = new_word
if len(UpperCAmelCase_ ) == 1:
break
else:
A = get_pairs(UpperCAmelCase_ )
A = '@@ '.join(UpperCAmelCase_ )
A = word[:-4]
A = word
return word
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ) -> Dict:
A = []
A = re.findall(R'\S+\n?' ,UpperCAmelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(UpperCAmelCase_ ).split(' ' ) ) )
return split_tokens
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ) -> Optional[int]:
return self.encoder.get(UpperCAmelCase_ ,self.encoder.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Any ) -> Tuple:
return self.decoder.get(UpperCAmelCase_ ,self.unk_token )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[int] ) -> Tuple:
A = ' '.join(UpperCAmelCase_ ).replace('@@ ' ,'' ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple ,A_ : str = None ) -> Union[str, Any]:
if not os.path.isdir(UpperCAmelCase_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
UpperCAmelCase_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
UpperCAmelCase_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.vocab_file ,UpperCAmelCase_ )
if os.path.abspath(self.merges_file ) != os.path.abspath(UpperCAmelCase_ ):
copyfile(self.merges_file ,UpperCAmelCase_ )
return out_vocab_file, out_merge_file
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[str] ) -> Union[str, Any]:
if isinstance(UpperCAmelCase_ ,UpperCAmelCase_ ):
try:
with open(UpperCAmelCase_ ,'r' ,encoding='utf-8' ) as fd:
self.add_from_file(UpperCAmelCase_ )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception(F'Incorrect encoding detected in {f}, please rebuild the dataset' )
return
A = f.readlines()
for lineTmp in lines:
A = lineTmp.strip()
A = line.rfind(' ' )
if idx == -1:
raise ValueError('Incorrect dictionary format, expected \'<token> <cnt>\'' )
A = line[:idx]
A = len(self.encoder ) | 710 |
"""simple docstring"""
from math import pi, sqrt
def _snake_case ( snake_case__ : float ):
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(snake_case__ ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(snake_case__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _snake_case ( ):
assert gamma(0.5 ) == sqrt(snake_case__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = 1.0
while num:
_lowercase = float(input('''Gamma of: '''))
print(F"""gamma({num}) = {gamma(num)}""")
print('''\nEnter 0 to exit...''') | 22 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'facebook/xmod-base': 'https://huggingface.co/facebook/xmod-base/resolve/main/config.json',
'facebook/xmod-large-prenorm': 'https://huggingface.co/facebook/xmod-large-prenorm/resolve/main/config.json',
'facebook/xmod-base-13-125k': 'https://huggingface.co/facebook/xmod-base-13-125k/resolve/main/config.json',
'facebook/xmod-base-30-125k': 'https://huggingface.co/facebook/xmod-base-30-125k/resolve/main/config.json',
'facebook/xmod-base-30-195k': 'https://huggingface.co/facebook/xmod-base-30-195k/resolve/main/config.json',
'facebook/xmod-base-60-125k': 'https://huggingface.co/facebook/xmod-base-60-125k/resolve/main/config.json',
'facebook/xmod-base-60-265k': 'https://huggingface.co/facebook/xmod-base-60-265k/resolve/main/config.json',
'facebook/xmod-base-75-125k': 'https://huggingface.co/facebook/xmod-base-75-125k/resolve/main/config.json',
'facebook/xmod-base-75-269k': 'https://huggingface.co/facebook/xmod-base-75-269k/resolve/main/config.json',
}
class lowerCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
_lowerCamelCase: Dict = '''xmod'''
def __init__( self : Optional[Any] ,A_ : List[Any]=3_0522 ,A_ : Dict=768 ,A_ : Tuple=12 ,A_ : List[Any]=12 ,A_ : Union[str, Any]=3072 ,A_ : Union[str, Any]="gelu" ,A_ : List[str]=0.1 ,A_ : List[str]=0.1 ,A_ : List[str]=512 ,A_ : Tuple=2 ,A_ : List[Any]=0.02 ,A_ : List[Any]=1e-12 ,A_ : Optional[int]=1 ,A_ : Tuple=0 ,A_ : Any=2 ,A_ : int="absolute" ,A_ : List[str]=True ,A_ : List[Any]=None ,A_ : Any=False ,A_ : Dict=2 ,A_ : Dict=False ,A_ : Dict=True ,A_ : Optional[int]=True ,A_ : Optional[int]=("en_XX",) ,A_ : Tuple=None ,**A_ : List[Any] ,) -> Optional[int]:
super().__init__(pad_token_id=UpperCamelCase__ ,bos_token_id=UpperCamelCase__ ,eos_token_id=UpperCamelCase__ ,**UpperCamelCase__ )
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_act
A = intermediate_size
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = initializer_range
A = layer_norm_eps
A = position_embedding_type
A = use_cache
A = classifier_dropout
A = pre_norm
A = adapter_reduction_factor
A = adapter_layer_norm
A = adapter_reuse_layer_norm
A = ln_before_adapter
A = list(UpperCamelCase__ )
A = default_language
class lowerCAmelCase_ ( UpperCamelCase_ ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
if self.task == "multiple-choice":
A = {0: '''batch''', 1: '''choice''', 2: '''sequence'''}
else:
A = {0: '''batch''', 1: '''sequence'''}
return OrderedDict(
[
('input_ids', dynamic_axis),
('attention_mask', dynamic_axis),
] ) | 711 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: torch.FloatTensor
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : Dict=3 ,A_ : int=3 ,A_ : str=("DownEncoderBlock2D",) ,A_ : Dict=(64,) ,A_ : str=2 ,A_ : Union[str, Any]=32 ,A_ : Optional[int]="silu" ,A_ : str=True ,) -> Union[str, Any]:
super().__init__()
A = layers_per_block
A = torch.nn.Convad(
A_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
# down
A = block_out_channels[0]
for i, down_block_type in enumerate(A_ ):
A = output_channel
A = block_out_channels[i]
A = i == len(A_ ) - 1
A = get_down_block(
A_ ,num_layers=self.layers_per_block ,in_channels=A_ ,out_channels=A_ ,add_downsample=not is_final_block ,resnet_eps=1e-6 ,downsample_padding=0 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,)
self.down_blocks.append(A_ )
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# out
A = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = 2 * out_channels if double_z else out_channels
A = nn.Convad(block_out_channels[-1] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = x
A = self.conv_in(A_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : Dict ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
# down
if is_torch_version('>=' ,'1.11.0' ):
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,use_reentrant=A_ )
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,use_reentrant=A_ )
else:
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ )
# middle
A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,A_ )
else:
# down
for down_block in self.down_blocks:
A = down_block(A_ )
# middle
A = self.mid_block(A_ )
# post-process
A = self.conv_norm_out(A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any]=3 ,A_ : Optional[int]=3 ,A_ : str=("UpDecoderBlock2D",) ,A_ : Any=(64,) ,A_ : Optional[int]=2 ,A_ : Optional[int]=32 ,A_ : Tuple="silu" ,A_ : Optional[int]="group" ,) -> Any:
super().__init__()
A = layers_per_block
A = nn.Convad(
A_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
A = in_channels if norm_type == 'spatial' else None
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# up
A = list(reversed(A_ ) )
A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
A = output_channel
A = reversed_block_out_channels[i]
A = i == len(A_ ) - 1
A = get_up_block(
A_ ,num_layers=self.layers_per_block + 1 ,in_channels=A_ ,out_channels=A_ ,prev_output_channel=A_ ,add_upsample=not is_final_block ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,resnet_time_scale_shift=A_ ,)
self.up_blocks.append(A_ )
A = output_channel
# out
if norm_type == "spatial":
A = SpatialNorm(block_out_channels[0] ,A_ )
else:
A = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = nn.Convad(block_out_channels[0] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Union[str, Any]=None ) -> Any:
A = z
A = self.conv_in(A_ )
A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : List[Any] ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
if is_torch_version('>=' ,'1.11.0' ):
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ ,use_reentrant=A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,A_ ,use_reentrant=A_ )
else:
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ,A_ )
else:
# middle
A = self.mid_block(A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = up_block(A_ ,A_ )
# post-process
if latent_embeds is None:
A = self.conv_norm_out(A_ )
else:
A = self.conv_norm_out(A_ ,A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict=None ,A_ : List[Any]="random" ,A_ : Optional[int]=False ,A_ : str=True ) -> List[str]:
super().__init__()
A = n_e
A = vq_embed_dim
A = beta
A = legacy
A = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
A = remap
if self.remap is not None:
self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) )
A = self.used.shape[0]
A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A = self.re_embed
A = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
A = n_e
A = sane_index_shape
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Any:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
A = (inds[:, :, None] == used[None, None, ...]).long()
A = match.argmax(-1 )
A = match.sum(2 ) < 1
if self.unknown_index == "random":
A = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
A = self.unknown_index
return new.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
if self.re_embed > self.used.shape[0]: # extra token
A = 0 # simply set to zero
A = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,A_ )
return back.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> str:
# reshape z -> (batch, height, width, channel) and flatten
A = z.permute(0 ,2 ,3 ,1 ).contiguous()
A = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A = torch.argmin(torch.cdist(A_ ,self.embedding.weight ) ,dim=1 )
A = self.embedding(A_ ).view(z.shape )
A = None
A = None
# compute loss for embedding
if not self.legacy:
A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A = z + (z_q - z).detach()
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
A = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
A = self.remap_to_used(A_ )
A = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
A = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : str ) -> Union[str, Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A = indices.reshape(shape[0] ,-1 ) # add batch axis
A = self.unmap_to_all(A_ )
A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A = self.embedding(A_ )
if shape is not None:
A = z_q.view(A_ )
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : str ,A_ : Tuple ,A_ : Dict=False ) -> List[str]:
A = parameters
A , A = torch.chunk(A_ ,2 ,dim=1 )
A = torch.clamp(self.logvar ,-30.0 ,20.0 )
A = deterministic
A = torch.exp(0.5 * self.logvar )
A = torch.exp(self.logvar )
if self.deterministic:
A = A = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
A = randn_tensor(
self.mean.shape ,generator=A_ ,device=self.parameters.device ,dtype=self.parameters.dtype )
A = self.mean + self.std * sample
return x
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple=None ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : Union[str, Any]=[1, 2, 3] ) -> List[str]:
if self.deterministic:
return torch.Tensor([0.0] )
A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return self.mean | 22 | 0 |
"""simple docstring"""
from ...utils import logging
from ..ta.modeling_tf_ta import TFTaEncoderModel, TFTaForConditionalGeneration, TFTaModel
from .configuration_mta import MTaConfig
_lowercase = logging.get_logger(__name__)
_lowercase = '''T5Config'''
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''mt5'''
_lowerCamelCase: Optional[Any] = MTaConfig
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = '''mt5'''
_lowerCamelCase: Optional[Any] = MTaConfig
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''mt5'''
_lowerCamelCase: List[Any] = MTaConfig | 712 |
"""simple docstring"""
def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ):
A = len(snake_case__ )
A = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
A = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
A = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 | 0 |
import json
import os
import unittest
from transformers import MgpstrTokenizer
from transformers.models.mgp_str.tokenization_mgp_str import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class lowerCAmelCase_ ( UpperCAmelCase__ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = MgpstrTokenizer
_lowerCamelCase: Dict = False
_lowerCamelCase: Optional[int] = {}
_lowerCamelCase: Optional[int] = False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
super().setUp()
# fmt: off
A = ["""[GO]""", """[s]""", """0""", """1""", """2""", """3""", """4""", """5""", """6""", """7""", """8""", """9""", """a""", """b""", """c""", """d""", """e""", """f""", """g""", """h""", """i""", """j""", """k""", """l""", """m""", """n""", """o""", """p""", """q""", """r""", """s""", """t""", """u""", """v""", """w""", """x""", """y""", """z"""]
# fmt: on
A = dict(zip(A_ ,range(len(A_ ) ) ) )
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(A_ ) + '\n' )
def _SCREAMING_SNAKE_CASE ( self : Any ,**A_ : int ) -> Optional[int]:
return MgpstrTokenizer.from_pretrained(self.tmpdirname ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[str] ) -> Optional[int]:
A = """tester"""
A = """tester"""
return input_text, output_text
@unittest.skip('MGP-STR always lower cases letters.' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> str:
pass
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
A = self.get_tokenizers(do_lower_case=A_ )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
A = """[SPECIAL_TOKEN]"""
tokenizer.add_special_tokens({'cls_token': special_token} )
A = tokenizer.encode([special_token] ,add_special_tokens=A_ )
self.assertEqual(len(A_ ) ,1 )
A = tokenizer.decode(A_ ,skip_special_tokens=A_ )
self.assertTrue(special_token not in decoded )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
A = self.get_input_output_texts(A_ )
A = tokenizer.tokenize(A_ )
A = tokenizer.convert_tokens_to_ids(A_ )
A = tokenizer.encode(A_ ,add_special_tokens=A_ )
self.assertListEqual(A_ ,A_ )
A = tokenizer.convert_ids_to_tokens(A_ )
self.assertNotEqual(len(A_ ) ,0 )
A = tokenizer.decode(A_ )
self.assertIsInstance(A_ ,A_ )
self.assertEqual(text_a.replace(' ' ,'' ) ,A_ )
@unittest.skip('MGP-STR tokenizer only handles one sequence.' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Union[str, Any]:
pass
@unittest.skip('inputs cannot be pretokenized in MgpstrTokenizer' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
pass | 713 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[int]=2 ,A_ : Any=True ,A_ : List[str]=False ,A_ : Tuple=10 ,A_ : List[Any]=3 ,A_ : Any=32 * 8 ,A_ : Dict=32 * 8 ,A_ : List[Any]=4 ,A_ : Tuple=64 ,) -> List[str]:
A = parent
A = batch_size
A = is_training
A = use_auxiliary_loss
A = num_queries
A = num_channels
A = min_size
A = max_size
A = num_labels
A = hidden_dim
A = hidden_dim
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A_ )
A = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=A_ )
A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=A_ ) > 0.5
).float()
A = (torch.rand((self.batch_size, self.num_labels) ,device=A_ ) > 0.5).long()
A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
A = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
A = self.num_queries
A = self.num_labels
A = [1, 1, 1, 1]
A = self.num_channels
A = 64
A = 128
A = self.hidden_dim
A = self.hidden_dim
A = self.hidden_dim
return config
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A , A , A , A , A = self.prepare_config_and_inputs()
A = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = output.encoder_hidden_states
A = output.pixel_decoder_hidden_states
A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,config.decoder_layers )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : List[str] ,A_ : Union[str, Any]=False ) -> str:
with torch.no_grad():
A = MaskaFormerModel(config=A_ )
model.to(A_ )
model.eval()
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ ,output_hidden_states=A_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Any ,A_ : Dict ,A_ : Any ,A_ : Dict ) -> Optional[Any]:
A = MaskaFormerForUniversalSegmentation(config=A_ )
model.to(A_ )
model.eval()
def comm_check_on_output(A_ : str ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ )
comm_check_on_output(A_ )
A = model(
pixel_values=A_ ,pixel_mask=A_ ,mask_labels=A_ ,class_labels=A_ )
comm_check_on_output(A_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowerCamelCase: Optional[Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_lowerCamelCase: int = False
_lowerCamelCase: Dict = False
_lowerCamelCase: List[str] = False
_lowerCamelCase: int = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = MaskaFormerModelTester(self )
A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
A = MaskaFormerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = (self.model_tester.min_size,) * 2
A = {
'pixel_values': torch.randn((2, 3, *size) ,device=A_ ),
'mask_labels': torch.randn((2, 10, *size) ,device=A_ ),
'class_labels': torch.zeros(2 ,10 ,device=A_ ).long(),
}
A = self.model_tester.get_config()
A = MaskaFormerForUniversalSegmentation(A_ ).to(A_ )
A = model(**A_ )
self.assertTrue(outputs.loss is not None )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ ).to(A_ )
A = model(**A_ ,output_attentions=A_ )
self.assertTrue(outputs.attentions is not None )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
if not self.model_tester.is_training:
return
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = model_class(A_ )
model.to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = True
A = True
A = model_class(A_ ).to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ )
A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowercase = 1e-4
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A_ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
A = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
# masks_queries_logits
A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
A = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
A = torch.tensor(A_ ).to(A_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,A_ ,atol=A_ ) )
# class_queries_logits
A = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
A = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(A_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
A = inputs['pixel_values'].to(A_ )
A = [el.to(A_ ) for el in inputs['mask_labels']]
A = [el.to(A_ ) for el in inputs['class_labels']]
with torch.no_grad():
A = model(**A_ )
self.assertTrue(outputs.loss is not None ) | 22 | 0 |
import unittest
import numpy as np
import requests
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
from transformers.pytorch_utils import is_torch_greater_or_equal_than_1_11
else:
_lowercase = False
if is_vision_available():
from PIL import Image
from transformers import PixaStructImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : int ,A_ : Optional[int]=7 ,A_ : List[str]=3 ,A_ : Tuple=18 ,A_ : List[Any]=30 ,A_ : Tuple=400 ,A_ : Any=None ,A_ : Optional[int]=True ,A_ : List[str]=True ,A_ : Union[str, Any]=None ,) -> List[Any]:
A = size if size is not None else {'height': 20, 'width': 20}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = size
A = do_normalize
A = do_convert_rgb
A = [512, 1024, 2048, 4096]
A = patch_size if patch_size is not None else {'height': 16, 'width': 16}
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
return {"do_normalize": self.do_normalize, "do_convert_rgb": self.do_convert_rgb}
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Tuple:
A = 'https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/transformers/tasks/australia.jpg'
A = Image.open(requests.get(__lowercase ,stream=__lowercase ).raw ).convert('RGB' )
return raw_image
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Any = PixaStructImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
A = PixaStructImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase ,'do_normalize' ) )
self.assertTrue(hasattr(__lowercase ,'do_convert_rgb' ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
A = self.image_processor_tester.prepare_dummy_image()
A = self.image_processing_class(**self.image_processor_dict )
A = 2048
A = image_processor(__lowercase ,return_tensors='pt' ,max_patches=__lowercase )
self.assertTrue(torch.allclose(inputs.flattened_patches.mean() ,torch.tensor(0.06_06 ) ,atol=1e-3 ,rtol=1e-3 ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
# Initialize image_processor
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,Image.Image )
# Test not batched input
A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
A = image_processor(
image_inputs[0] ,return_tensors='pt' ,max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
A = image_processor(
__lowercase ,return_tensors='pt' ,max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
# Initialize image_processor
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,Image.Image )
# Test not batched input
A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
A = True
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
with self.assertRaises(__lowercase ):
A = image_processor(
image_inputs[0] ,return_tensors='pt' ,max_patches=__lowercase ).flattened_patches
A = 'Hello'
A = image_processor(
image_inputs[0] ,return_tensors='pt' ,max_patches=__lowercase ,header_text=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
A = image_processor(
__lowercase ,return_tensors='pt' ,max_patches=__lowercase ,header_text=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
# Initialize image_processor
A = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase ,numpify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,np.ndarray )
A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
A = image_processor(
image_inputs[0] ,return_tensors='pt' ,max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
A = image_processor(
__lowercase ,return_tensors='pt' ,max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
# Initialize image_processor
A = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase ,torchify=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,torch.Tensor )
# Test not batched input
A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* self.image_processor_tester.num_channels
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
A = image_processor(
image_inputs[0] ,return_tensors='pt' ,max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
A = image_processor(
__lowercase ,return_tensors='pt' ,max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,)
@unittest.skipIf(
not is_torch_greater_or_equal_than_1_11 , reason='''`Pix2StructImageProcessor` requires `torch>=1.11.0`.''' , )
@require_torch
@require_vision
class lowerCAmelCase_ ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = PixaStructImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = PixaStructImageProcessingTester(self ,num_channels=4 )
A = 3
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__lowercase ,'do_normalize' ) )
self.assertTrue(hasattr(__lowercase ,'do_convert_rgb' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
# Initialize image_processor
A = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A = prepare_image_inputs(self.image_processor_tester ,equal_resolution=__lowercase )
for image in image_inputs:
self.assertIsInstance(__lowercase ,Image.Image )
# Test not batched input
A = (
(self.image_processor_tester.patch_size['height'] * self.image_processor_tester.patch_size['width'])
* (self.image_processor_tester.num_channels - 1)
) + 2
for max_patch in self.image_processor_tester.max_patches:
# Test not batched input
A = image_processor(
image_inputs[0] ,return_tensors='pt' ,max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(1, max_patch, expected_hidden_dim) ,)
# Test batched
A = image_processor(
__lowercase ,return_tensors='pt' ,max_patches=__lowercase ).flattened_patches
self.assertEqual(
encoded_images.shape ,(self.image_processor_tester.batch_size, max_patch, expected_hidden_dim) ,) | 714 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = 99
A = 32
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.prepare_config_and_inputs()
A = True
A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict:
A = TFEsmModel(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]:
A = True
A = TFEsmModel(config=A_ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ ,encoder_hidden_states=A_ )
# Also check the case where encoder outputs are not passed
A = model(A_ ,attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict:
A = TFEsmForMaskedLM(config=A_ )
A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]:
A = self.num_labels
A = TFEsmForTokenClassification(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase: List[str] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = TFEsmModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFEsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A = model.get_bias()
assert isinstance(A_ ,A_ )
for k, v in name.items():
assert isinstance(A_ ,tf.Variable )
else:
A = model.get_output_embeddings()
assert x is None
A = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(A_ )[0]
A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,A_ )
# compare the actual values for a slice.
A = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A = model(A_ )[0]
# compare the actual values for a slice.
A = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) ) | 22 | 0 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCAmelCase_ ( __snake_case ):
'''simple docstring'''
_lowerCamelCase: Any = 'Wav2Vec2FeatureExtractor'
_lowerCamelCase: List[str] = 'AutoTokenizer'
def __init__( self : int ,A_ : Dict ,A_ : str ) -> int:
super().__init__(A_ ,A_ )
A = self.feature_extractor
A = False
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ,A_ : Optional[Any] ,**A_ : List[Any] ) -> List[str]:
try:
return super().from_pretrained(A_ ,**A_ )
except OSError:
warnings.warn(
F'Loading a tokenizer inside {cls.__name__} from a config that does not'
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' ,A_ ,)
A = WavaVecaFeatureExtractor.from_pretrained(A_ ,**A_ )
A = WavaVecaCTCTokenizer.from_pretrained(A_ ,**A_ )
return cls(feature_extractor=A_ ,tokenizer=A_ )
def __call__( self : Dict ,*A_ : Tuple ,**A_ : List[str] ) -> List[str]:
if self._in_target_context_manager:
return self.current_processor(*A_ ,**A_ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
A = kwargs.pop('raw_speech' )
else:
A = kwargs.pop('audio' ,A_ )
A = kwargs.pop('sampling_rate' ,A_ )
A = kwargs.pop('text' ,A_ )
if len(A_ ) > 0:
A = args[0]
A = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
A = self.feature_extractor(A_ ,*A_ ,sampling_rate=A_ ,**A_ )
if text is not None:
A = self.tokenizer(A_ ,**A_ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A = encodings["input_ids"]
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ,*A_ : Dict ,**A_ : Dict ) -> List[str]:
if self._in_target_context_manager:
return self.current_processor.pad(*A_ ,**A_ )
A = kwargs.pop('input_features' ,A_ )
A = kwargs.pop('labels' ,A_ )
if len(A_ ) > 0:
A = args[0]
A = args[1:]
if input_features is not None:
A = self.feature_extractor.pad(A_ ,*A_ ,**A_ )
if labels is not None:
A = self.tokenizer.pad(A_ ,**A_ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A = labels["input_ids"]
return input_features
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,*A_ : str ,**A_ : Optional[int] ) -> Optional[Any]:
return self.tokenizer.batch_decode(*A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,*A_ : Any ,**A_ : List[Any] ) -> Any:
return self.tokenizer.decode(*A_ ,**A_ )
@contextmanager
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
A = True
A = self.tokenizer
yield
A = self.feature_extractor
A = False | 715 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
_lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
_lowercase = '''|'''.join(sys.argv[1:])
_lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""")
_lowercase = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''') | 22 | 0 |
"""simple docstring"""
import logging
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import arg_to_scheduler
from transformers import TrainingArguments
_lowercase = logging.getLogger(__name__)
@dataclass
class lowerCAmelCase_ ( UpperCamelCase__ ):
'''simple docstring'''
_lowerCamelCase: Optional[float] = field(
default=0.0 , metadata={'''help''': '''The label smoothing epsilon to apply (if not zero).'''} )
_lowerCamelCase: bool = field(default=UpperCamelCase__ , metadata={'''help''': '''Whether to SortishSamler or not.'''} )
_lowerCamelCase: bool = field(
default=UpperCamelCase__ , metadata={'''help''': '''Whether to use generate to calculate generative metrics (ROUGE, BLEU).'''} )
_lowerCamelCase: bool = field(default=UpperCamelCase__ , metadata={'''help''': '''whether to use adafactor'''} )
_lowerCamelCase: Optional[float] = field(
default=UpperCamelCase__ , metadata={'''help''': '''Encoder layer dropout probability. Goes into model.config.'''} )
_lowerCamelCase: Optional[float] = field(
default=UpperCamelCase__ , metadata={'''help''': '''Decoder layer dropout probability. Goes into model.config.'''} )
_lowerCamelCase: Optional[float] = field(default=UpperCamelCase__ , metadata={'''help''': '''Dropout probability. Goes into model.config.'''} )
_lowerCamelCase: Optional[float] = field(
default=UpperCamelCase__ , metadata={'''help''': '''Attention dropout probability. Goes into model.config.'''} )
_lowerCamelCase: Optional[str] = field(
default='''linear''' , metadata={'''help''': F'Which lr scheduler to use. Selected in {sorted(arg_to_scheduler.keys() )}'} , ) | 716 |
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> int:
A = []
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Optional[int]:
return self.node_position[vertex]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> List[Any]:
A = pos
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Dict ,A_ : List[str] ) -> str:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A = 2 * start + 1
else:
A = 2 * start + 2
if heap[smallest_child] < heap[start]:
A , A = heap[smallest_child], positions[smallest_child]
A , A = (
heap[start],
positions[start],
)
A , A = temp, tempa
A = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,A_ )
self.top_to_bottom(A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Dict ,A_ : str ,A_ : Union[str, Any] ) -> Dict:
A = position[index]
while index != 0:
A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A = heap[parent]
A = position[parent]
self.set_position(position[parent] ,A_ )
else:
A = val
A = temp
self.set_position(A_ ,A_ )
break
A = parent
else:
A = val
A = temp
self.set_position(A_ ,0 )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Dict ) -> Union[str, Any]:
A = len(A_ ) // 2 - 1
for i in range(A_ ,-1 ,-1 ):
self.top_to_bottom(A_ ,A_ ,len(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Dict ) -> Union[str, Any]:
A = positions[0]
A = sys.maxsize
self.top_to_bottom(A_ ,0 ,len(A_ ) ,A_ )
return temp
def _snake_case ( snake_case__ : Dict ):
A = Heap()
A = [0] * len(snake_case__ )
A = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A = [] # Heap of Distance of vertices from their neighboring vertex
A = []
for vertex in range(len(snake_case__ ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case__ )
heap.node_position.append(snake_case__ )
A = []
A = 1
A = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A = 0
A = distance
heap.heapify(snake_case__ , snake_case__ )
for _ in range(1 , len(snake_case__ ) ):
A = heap.delete_minimum(snake_case__ , snake_case__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case__ )]
):
A = distance
heap.bottom_to_top(
snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ )
A = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowercase = int(input('''Enter number of edges: ''').strip())
_lowercase = defaultdict(list)
for _ in range(edges_number):
_lowercase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 22 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
_lowercase = '''\n Examples:\n ```py\n >>> from diffusers import KandinskyV22Img2ImgPipeline, KandinskyV22PriorPipeline\n >>> from diffusers.utils import load_image\n >>> import torch\n\n >>> pipe_prior = KandinskyV22PriorPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-prior", torch_dtype=torch.float16\n ... )\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "A red cartoon frog, 4k"\n >>> image_emb, zero_image_emb = pipe_prior(prompt, return_dict=False)\n\n >>> pipe = KandinskyV22Img2ImgPipeline.from_pretrained(\n ... "kandinsky-community/kandinsky-2-2-decoder", torch_dtype=torch.float16\n ... )\n >>> pipe.to("cuda")\n\n >>> init_image = load_image(\n ... "https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"\n ... "/kandinsky/frog.png"\n ... )\n\n >>> image = pipe(\n ... image=init_image,\n ... image_embeds=image_emb,\n ... negative_image_embeds=zero_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... strength=0.2,\n ... ).images\n\n >>> image[0].save("red_frog.png")\n ```\n'''
def _snake_case ( snake_case__ : Any , snake_case__ : int , snake_case__ : Any=8 ):
A = height // scale_factor**2
if height % scale_factor**2 != 0:
new_height += 1
A = width // scale_factor**2
if width % scale_factor**2 != 0:
new_width += 1
return new_height * scale_factor, new_width * scale_factor
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Optional[int]=512 , snake_case__ : int=512 ):
A = pil_image.resize((w, h) , resample=Image.BICUBIC , reducing_gap=1 )
A = np.array(pil_image.convert('RGB' ) )
A = arr.astype(np.floataa ) / 127.5 - 1
A = np.transpose(__lowerCAmelCase , [2, 0, 1] )
A = torch.from_numpy(__lowerCAmelCase ).unsqueeze(0 )
return image
class lowerCAmelCase_ ( UpperCAmelCase__ ):
'''simple docstring'''
def __init__( self : int ,A_ : UNetaDConditionModel ,A_ : DDPMScheduler ,A_ : VQModel ,) -> Union[str, Any]:
super().__init__()
self.register_modules(
unet=lowerCamelCase__ ,scheduler=lowerCamelCase__ ,movq=lowerCamelCase__ ,)
A = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : int ,A_ : Tuple ,A_ : Union[str, Any] ) -> Union[str, Any]:
A = min(int(num_inference_steps * strength ) ,lowerCamelCase__ )
A = max(num_inference_steps - init_timestep ,0 )
A = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Tuple ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Dict=None ) -> Any:
if not isinstance(lowerCamelCase__ ,(torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
F'`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(lowerCamelCase__ )}' )
A = image.to(device=lowerCamelCase__ ,dtype=lowerCamelCase__ )
A = batch_size * num_images_per_prompt
if image.shape[1] == 4:
A = image
else:
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ) and len(lowerCamelCase__ ) != batch_size:
raise ValueError(
F'You have passed a list of generators of length {len(lowerCamelCase__ )}, but requested an effective batch'
F' size of {batch_size}. Make sure the batch size matches the length of the generators.' )
elif isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
A = [
self.movq.encode(image[i : i + 1] ).latent_dist.sample(generator[i] ) for i in range(lowerCamelCase__ )
]
A = torch.cat(lowerCamelCase__ ,dim=0 )
else:
A = self.movq.encode(lowerCamelCase__ ).latent_dist.sample(lowerCamelCase__ )
A = self.movq.config.scaling_factor * init_latents
A = torch.cat([init_latents] ,dim=0 )
A = init_latents.shape
A = randn_tensor(lowerCamelCase__ ,generator=lowerCamelCase__ ,device=lowerCamelCase__ ,dtype=lowerCamelCase__ )
# get latents
A = self.scheduler.add_noise(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
A = init_latents
return latents
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[Any]=0 ) -> str:
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError('Please install accelerate via `pip install accelerate`' )
A = torch.device(F'cuda:{gpu_id}' )
A = [
self.unet,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowerCamelCase__ ,lowerCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple=0 ) -> List[Any]:
if is_accelerate_available() and is_accelerate_version('>=' ,'0.17.0.dev0' ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError('`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.' )
A = torch.device(F'cuda:{gpu_id}' )
if self.device.type != "cpu":
self.to('cpu' ,silence_dtype_warnings=lowerCamelCase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
A = None
for cpu_offloaded_model in [self.unet, self.movq]:
A = cpu_offload_with_hook(lowerCamelCase__ ,lowerCamelCase__ ,prev_module_hook=lowerCamelCase__ )
# We'll offload the last model manually.
A = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
if not hasattr(self.unet ,'_hf_hook' ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowerCamelCase__ ,'_hf_hook' )
and hasattr(module._hf_hook ,'execution_device' )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowerCamelCase__ )
def __call__( self : str ,A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] ,A_ : Union[torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]] ,A_ : Union[torch.FloatTensor, List[torch.FloatTensor]] ,A_ : int = 512 ,A_ : int = 512 ,A_ : int = 100 ,A_ : float = 4.0 ,A_ : float = 0.3 ,A_ : int = 1 ,A_ : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,A_ : Optional[str] = "pil" ,A_ : bool = True ,) -> str:
A = self._execution_device
A = guidance_scale > 1.0
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
A = torch.cat(lowerCamelCase__ ,dim=0 )
A = image_embeds.shape[0]
if isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
A = torch.cat(lowerCamelCase__ ,dim=0 )
if do_classifier_free_guidance:
A = image_embeds.repeat_interleave(lowerCamelCase__ ,dim=0 )
A = negative_image_embeds.repeat_interleave(lowerCamelCase__ ,dim=0 )
A = torch.cat([negative_image_embeds, image_embeds] ,dim=0 ).to(dtype=self.unet.dtype ,device=lowerCamelCase__ )
if not isinstance(lowerCamelCase__ ,lowerCamelCase__ ):
A = [image]
if not all(isinstance(lowerCamelCase__ ,(PIL.Image.Image, torch.Tensor) ) for i in image ):
raise ValueError(
F'Input is in incorrect format: {[type(lowerCamelCase__ ) for i in image]}. Currently, we only support PIL image and pytorch tensor' )
A = torch.cat([prepare_image(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ) for i in image] ,dim=0 )
A = image.to(dtype=image_embeds.dtype ,device=lowerCamelCase__ )
A = self.movq.encode(lowerCamelCase__ )["latents"]
A = latents.repeat_interleave(lowerCamelCase__ ,dim=0 )
self.scheduler.set_timesteps(lowerCamelCase__ ,device=lowerCamelCase__ )
A = self.get_timesteps(lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ )
A = timesteps[:1].repeat(batch_size * num_images_per_prompt )
A = downscale_height_and_width(lowerCamelCase__ ,lowerCamelCase__ ,self.movq_scale_factor )
A = self.prepare_latents(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,image_embeds.dtype ,lowerCamelCase__ ,lowerCamelCase__ )
for i, t in enumerate(self.progress_bar(lowerCamelCase__ ) ):
# expand the latents if we are doing classifier free guidance
A = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
A = {"image_embeds": image_embeds}
A = self.unet(
sample=lowerCamelCase__ ,timestep=lowerCamelCase__ ,encoder_hidden_states=lowerCamelCase__ ,added_cond_kwargs=lowerCamelCase__ ,return_dict=lowerCamelCase__ ,)[0]
if do_classifier_free_guidance:
A = noise_pred.split(latents.shape[1] ,dim=1 )
A = noise_pred.chunk(2 )
A = variance_pred.chunk(2 )
A = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
A = torch.cat([noise_pred, variance_pred_text] ,dim=1 )
if not (
hasattr(self.scheduler.config ,'variance_type' )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
A = noise_pred.split(latents.shape[1] ,dim=1 )
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(
lowerCamelCase__ ,lowerCamelCase__ ,lowerCamelCase__ ,generator=lowerCamelCase__ ,)[0]
# post-processing
A = self.movq.decode(lowerCamelCase__ ,force_not_quantize=lowerCamelCase__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(F'Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}' )
if output_type in ["np", "pil"]:
A = image * 0.5 + 0.5
A = image.clamp(0 ,1 )
A = image.cpu().permute(0 ,2 ,3 ,1 ).float().numpy()
if output_type == "pil":
A = self.numpy_to_pil(lowerCamelCase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowerCamelCase__ )
| 717 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase = True
except ImportError:
_lowercase = False
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( snake_case__ : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]:
A = testing
A = testing_file
A = path
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A = (
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file ,'r' ) as configuration_file:
A = json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,)
A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' ,'r' ) as configuration_file:
A = json.load(A_ )
A = configuration['lowercase_modelname']
A = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
A = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A = 'Flax' in generate_tensorflow_pytorch_and_flax
A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A_ ,exist_ok=A_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ):
pass
shutil.move(
F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(A_ : int ):
with open(A_ ,'r' ) as f:
A = f.readlines()
with open(A_ ,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A_ : str ,A_ : str ,A_ : List[str] ):
# Create temp file
A , A = mkstemp()
A = False
with fdopen(A_ ,'w' ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
A = True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A_ ,A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ ,A_ )
def skip_units(A_ : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A_ : Tuple ):
with open(A_ ) as datafile:
A = []
A = False
A = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# Below: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ ,A_ ,A_ )
A = []
elif "# Replace with" in line and "##" not in line:
A = []
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A_ ) | 22 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SegformerConfig,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : List[Any] , snake_case__ : Any=False ):
A = OrderedDict()
for key, value in state_dict.items():
if encoder_only and not key.startswith('head' ):
A = 'segformer.encoder.' + key
if key.startswith('backbone' ):
A = key.replace('backbone' , 'segformer.encoder' )
if "patch_embed" in key:
# replace for example patch_embed1 by patch_embeddings.0
A = key[key.find('patch_embed' ) + len('patch_embed' )]
A = key.replace(F'patch_embed{idx}' , F'patch_embeddings.{int(lowercase__ )-1}' )
if "norm" in key:
A = key.replace('norm' , 'layer_norm' )
if "segformer.encoder.layer_norm" in key:
# replace for example layer_norm1 by layer_norm.0
A = key[key.find('segformer.encoder.layer_norm' ) + len('segformer.encoder.layer_norm' )]
A = key.replace(F'layer_norm{idx}' , F'layer_norm.{int(lowercase__ )-1}' )
if "layer_norm1" in key:
A = key.replace('layer_norm1' , 'layer_norm_1' )
if "layer_norm2" in key:
A = key.replace('layer_norm2' , 'layer_norm_2' )
if "block" in key:
# replace for example block1 by block.0
A = key[key.find('block' ) + len('block' )]
A = key.replace(F'block{idx}' , F'block.{int(lowercase__ )-1}' )
if "attn.q" in key:
A = key.replace('attn.q' , 'attention.self.query' )
if "attn.proj" in key:
A = key.replace('attn.proj' , 'attention.output.dense' )
if "attn" in key:
A = key.replace('attn' , 'attention.self' )
if "fc1" in key:
A = key.replace('fc1' , 'dense1' )
if "fc2" in key:
A = key.replace('fc2' , 'dense2' )
if "linear_pred" in key:
A = key.replace('linear_pred' , 'classifier' )
if "linear_fuse" in key:
A = key.replace('linear_fuse.conv' , 'linear_fuse' )
A = key.replace('linear_fuse.bn' , 'batch_norm' )
if "linear_c" in key:
# replace for example linear_c4 by linear_c.3
A = key[key.find('linear_c' ) + len('linear_c' )]
A = key.replace(F'linear_c{idx}' , F'linear_c.{int(lowercase__ )-1}' )
if key.startswith('head' ):
A = key.replace('head' , 'classifier' )
A = value
return new_state_dict
def _snake_case ( snake_case__ : Dict , snake_case__ : List[str] ):
for i in range(config.num_encoder_blocks ):
for j in range(config.depths[i] ):
# read in weights + bias of keys and values (which is a single matrix in the original implementation)
A = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.weight' )
A = state_dict.pop(F'segformer.encoder.block.{i}.{j}.attention.self.kv.bias' )
# next, add keys and values (in that order) to the state dict
A = kv_weight[
: config.hidden_sizes[i], :
]
A = kv_bias[: config.hidden_sizes[i]]
A = kv_weight[
config.hidden_sizes[i] :, :
]
A = kv_bias[
config.hidden_sizes[i] :
]
def _snake_case ( ):
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return image
@torch.no_grad()
def _snake_case ( snake_case__ : List[str] , snake_case__ : int , snake_case__ : Optional[int] ):
A = SegformerConfig()
A = False
# set attributes based on model_name
A = 'huggingface/label-files'
if "segformer" in model_name:
A = model_name[len('segformer.' ) : len('segformer.' ) + 2]
if "ade" in model_name:
A = 150
A = 'ade20k-id2label.json'
A = (1, 150, 128, 128)
elif "city" in model_name:
A = 19
A = 'cityscapes-id2label.json'
A = (1, 19, 128, 128)
else:
raise ValueError(F'Model {model_name} not supported' )
elif "mit" in model_name:
A = True
A = model_name[4:6]
A = 1000
A = 'imagenet-1k-id2label.json'
A = (1, 1000)
else:
raise ValueError(F'Model {model_name} not supported' )
# set config attributes
A = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type='dataset' ) , 'r' ) )
A = {int(lowercase__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
if size == "b0":
pass
elif size == "b1":
A = [64, 128, 320, 512]
A = 256
elif size == "b2":
A = [64, 128, 320, 512]
A = 768
A = [3, 4, 6, 3]
elif size == "b3":
A = [64, 128, 320, 512]
A = 768
A = [3, 4, 18, 3]
elif size == "b4":
A = [64, 128, 320, 512]
A = 768
A = [3, 8, 27, 3]
elif size == "b5":
A = [64, 128, 320, 512]
A = 768
A = [3, 6, 40, 3]
else:
raise ValueError(F'Size {size} not supported' )
# load image processor (only resize + normalize)
A = SegformerImageProcessor(
image_scale=(512, 512) , keep_ratio=lowercase__ , align=lowercase__ , do_random_crop=lowercase__ )
# prepare image
A = prepare_img()
A = image_processor(images=lowercase__ , return_tensors='pt' ).pixel_values
logger.info(F'Converting model {model_name}...' )
# load original state dict
if encoder_only:
A = torch.load(lowercase__ , map_location=torch.device('cpu' ) )
else:
A = torch.load(lowercase__ , map_location=torch.device('cpu' ) )['state_dict']
# rename keys
A = rename_keys(lowercase__ , encoder_only=lowercase__ )
if not encoder_only:
del state_dict["decode_head.conv_seg.weight"]
del state_dict["decode_head.conv_seg.bias"]
# key and value matrices need special treatment
read_in_k_v(lowercase__ , lowercase__ )
# create HuggingFace model and load state dict
if encoder_only:
A = False
A = SegformerForImageClassification(lowercase__ )
else:
A = SegformerForSemanticSegmentation(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
# forward pass
A = model(lowercase__ )
A = outputs.logits
# set expected_slice based on model name
# ADE20k checkpoints
if model_name == "segformer.b0.512x512.ade.160k":
A = torch.tensor(
[
[[-4.6310, -5.5232, -6.2356], [-5.1921, -6.1444, -6.5996], [-5.4424, -6.2790, -6.7574]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] )
elif model_name == "segformer.b1.512x512.ade.160k":
A = torch.tensor(
[
[[-7.5820, -8.7231, -8.3215], [-8.0600, -10.3529, -10.0304], [-7.5208, -9.4103, -9.6239]],
[[-12.6918, -13.8994, -13.7137], [-13.3196, -15.7523, -15.4789], [-12.9343, -14.8757, -14.9689]],
[[-11.1911, -11.9421, -11.3243], [-11.3342, -13.6839, -13.3581], [-10.3909, -12.1832, -12.4858]],
] )
elif model_name == "segformer.b2.512x512.ade.160k":
A = torch.tensor(
[
[[-11.8173, -14.3850, -16.3128], [-14.5648, -16.5804, -18.6568], [-14.7223, -15.7387, -18.4218]],
[[-15.7290, -17.9171, -19.4423], [-18.3105, -19.9448, -21.4661], [-17.9296, -18.6497, -20.7910]],
[[-15.0783, -17.0336, -18.2789], [-16.8771, -18.6870, -20.1612], [-16.2454, -17.1426, -19.5055]],
] )
elif model_name == "segformer.b3.512x512.ade.160k":
A = torch.tensor(
[
[[-9.0878, -10.2081, -10.1891], [-9.3144, -10.7941, -10.9843], [-9.2294, -10.3855, -10.5704]],
[[-12.2316, -13.9068, -13.6102], [-12.9161, -14.3702, -14.3235], [-12.5233, -13.7174, -13.7932]],
[[-14.6275, -15.2490, -14.9727], [-14.3400, -15.9687, -16.2827], [-14.1484, -15.4033, -15.8937]],
] )
elif model_name == "segformer.b4.512x512.ade.160k":
A = torch.tensor(
[
[[-12.3144, -13.2447, -14.0802], [-13.3614, -14.5816, -15.6117], [-13.3340, -14.4433, -16.2219]],
[[-19.2781, -20.4128, -20.7506], [-20.6153, -21.6566, -22.0998], [-19.9800, -21.0430, -22.1494]],
[[-18.8739, -19.7804, -21.1834], [-20.1233, -21.6765, -23.2944], [-20.0315, -21.2641, -23.6944]],
] )
elif model_name == "segformer.b5.640x640.ade.160k":
A = torch.tensor(
[
[[-9.5524, -12.0835, -11.7348], [-10.5229, -13.6446, -14.5662], [-9.5842, -12.8851, -13.9414]],
[[-15.3432, -17.5323, -17.0818], [-16.3330, -18.9255, -19.2101], [-15.1340, -17.7848, -18.3971]],
[[-12.6072, -14.9486, -14.6631], [-13.7629, -17.0907, -17.7745], [-12.7899, -16.1695, -17.1671]],
] )
# Cityscapes checkpoints
elif model_name == "segformer.b0.1024x1024.city.160k":
A = torch.tensor(
[
[[-11.9295, -13.4057, -14.8106], [-13.3431, -14.8179, -15.3781], [-14.2836, -15.5942, -16.1588]],
[[-11.4906, -12.8067, -13.6564], [-13.1189, -14.0500, -14.1543], [-13.8748, -14.5136, -14.8789]],
[[0.5374, 0.1067, -0.4742], [0.1141, -0.2255, -0.7099], [-0.3000, -0.5924, -1.3105]],
] )
elif model_name == "segformer.b0.512x1024.city.160k":
A = torch.tensor(
[
[[-7.8217, -9.8767, -10.1717], [-9.4438, -10.9058, -11.4047], [-9.7939, -12.3495, -12.1079]],
[[-7.1514, -9.5336, -10.0860], [-9.7776, -11.6822, -11.8439], [-10.1411, -12.7655, -12.8972]],
[[0.3021, 0.0805, -0.2310], [-0.0328, -0.1605, -0.2714], [-0.1408, -0.5477, -0.6976]],
] )
elif model_name == "segformer.b0.640x1280.city.160k":
A = torch.tensor(
[
[
[-1.13_72e01, -1.27_87e01, -1.34_77e01],
[-1.25_36e01, -1.41_94e01, -1.44_09e01],
[-1.32_17e01, -1.48_88e01, -1.53_27e01],
],
[
[-1.47_91e01, -1.71_22e01, -1.82_77e01],
[-1.71_63e01, -1.91_92e01, -1.95_33e01],
[-1.78_97e01, -1.99_91e01, -2.03_15e01],
],
[
[7.67_23e-01, 4.19_21e-01, -7.78_78e-02],
[4.77_72e-01, 9.55_57e-03, -2.80_82e-01],
[3.60_32e-01, -2.48_26e-01, -5.11_68e-01],
],
] )
elif model_name == "segformer.b0.768x768.city.160k":
A = torch.tensor(
[
[[-9.4959, -11.3087, -11.7479], [-11.0025, -12.6540, -12.3319], [-11.4064, -13.0487, -12.9905]],
[[-9.8905, -11.3084, -12.0854], [-11.1726, -12.7698, -12.9583], [-11.5985, -13.3278, -14.1774]],
[[0.2213, 0.0192, -0.2466], [-0.1731, -0.4213, -0.4874], [-0.3126, -0.6541, -1.1389]],
] )
elif model_name == "segformer.b1.1024x1024.city.160k":
A = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.6456, -3.0209, -1.4203], [-3.0797, -3.1959, -2.0000], [-1.8757, -1.9217, -1.6997]],
] )
elif model_name == "segformer.b2.1024x1024.city.160k":
A = torch.tensor(
[
[[-16.0976, -16.4856, -17.3962], [-16.6234, -19.0342, -19.7685], [-16.0900, -18.0661, -19.1180]],
[[-18.4750, -18.8488, -19.5074], [-19.4030, -22.1570, -22.5977], [-19.1191, -20.8486, -22.3783]],
[[-4.5178, -5.5037, -6.5109], [-5.0884, -7.2174, -8.0334], [-4.4156, -5.8117, -7.2970]],
] )
elif model_name == "segformer.b3.1024x1024.city.160k":
A = torch.tensor(
[
[[-14.2081, -14.4732, -14.1977], [-14.5867, -16.4423, -16.6356], [-13.4441, -14.9685, -16.8696]],
[[-14.4576, -14.7073, -15.0451], [-15.0816, -17.6237, -17.9873], [-14.4213, -16.0199, -18.5992]],
[[-4.7349, -4.9588, -5.0966], [-4.3210, -6.9325, -7.2591], [-3.4312, -4.7484, -7.1917]],
] )
elif model_name == "segformer.b4.1024x1024.city.160k":
A = torch.tensor(
[
[[-11.7737, -11.9526, -11.3273], [-13.6692, -14.4574, -13.8878], [-13.8937, -14.6924, -15.9345]],
[[-14.6706, -14.5330, -14.1306], [-16.1502, -16.8180, -16.4269], [-16.8338, -17.8939, -20.1746]],
[[1.0491, 0.8289, 1.0310], [1.1044, 0.5219, 0.8055], [1.0899, 0.6926, 0.5590]],
] )
elif model_name == "segformer.b5.1024x1024.city.160k":
A = torch.tensor(
[
[[-12.5641, -13.4777, -13.0684], [-13.9587, -15.8983, -16.6557], [-13.3109, -15.7350, -16.3141]],
[[-14.7074, -15.4352, -14.5944], [-16.6353, -18.1663, -18.6120], [-15.1702, -18.0329, -18.1547]],
[[-1.7990, -2.0951, -1.7784], [-2.6397, -3.8245, -3.9686], [-1.5264, -2.8126, -2.9316]],
] )
else:
A = logits.argmax(-1 ).item()
print('Predicted class:' , model.config.idalabel[predicted_class_idx] )
# verify logits
if not encoder_only:
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3, :3, :3] , lowercase__ , atol=1e-2 )
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...' )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''segformer.b0.512x512.ade.160k''',
type=str,
help='''Name of the model you\'d like to convert.''',
)
parser.add_argument(
'''--checkpoint_path''', default=None, type=str, help='''Path to the original PyTorch checkpoint (.pth file).'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_segformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 718 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str:
A = size if size is not None else {'height': 18, 'width': 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = ImageGPTImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'clusters' ) )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
self.assertTrue(hasattr(A_ ,'do_normalize' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
A = self.image_processing_class(**self.image_processor_dict )
A = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,obj[key] ) )
else:
self.assertEqual(obj[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(A_ ,'image_processor.json' )
image_processor_first.to_json_file(A_ )
A = self.image_processing_class.from_json_file(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(A_ )
A = self.image_processing_class.from_pretrained(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
pass
def _snake_case ( ):
A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
A = Image.open(dataset[4]['file'] )
A = Image.open(dataset[5]['file'] )
A = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
A = prepare_images()
# test non-batched
A = image_processing(images[0] ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1024) )
A = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ )
# test batched
A = image_processing(A_ ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1024) )
A = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ ) | 22 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'uclanlp/visualbert-vqa': 'https://huggingface.co/uclanlp/visualbert-vqa/resolve/main/config.json',
'uclanlp/visualbert-vqa-pre': 'https://huggingface.co/uclanlp/visualbert-vqa-pre/resolve/main/config.json',
'uclanlp/visualbert-vqa-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vqa-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-vcr': 'https://huggingface.co/uclanlp/visualbert-vcr/resolve/main/config.json',
'uclanlp/visualbert-vcr-pre': 'https://huggingface.co/uclanlp/visualbert-vcr-pre/resolve/main/config.json',
'uclanlp/visualbert-vcr-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-vcr-coco-pre/resolve/main/config.json'
),
'uclanlp/visualbert-nlvr2': 'https://huggingface.co/uclanlp/visualbert-nlvr2/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-pre': 'https://huggingface.co/uclanlp/visualbert-nlvr2-pre/resolve/main/config.json',
'uclanlp/visualbert-nlvr2-coco-pre': (
'https://huggingface.co/uclanlp/visualbert-nlvr2-coco-pre/resolve/main/config.json'
)
# See all VisualBERT models at https://huggingface.co/models?filter=visual_bert
}
class lowerCAmelCase_ ( __lowercase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''visual_bert'''
def __init__( self : Optional[Any] ,A_ : Union[str, Any]=3_0522 ,A_ : Tuple=768 ,A_ : Dict=512 ,A_ : Dict=12 ,A_ : Any=12 ,A_ : Optional[Any]=3072 ,A_ : Tuple="gelu" ,A_ : List[Any]=0.1 ,A_ : Tuple=0.1 ,A_ : Optional[int]=512 ,A_ : str=2 ,A_ : List[Any]=0.02 ,A_ : List[Any]=1e-12 ,A_ : List[str]=False ,A_ : Dict=True ,A_ : Optional[int]=1 ,A_ : str=0 ,A_ : Dict=2 ,**A_ : Union[str, Any] ,) -> Optional[int]:
super().__init__(pad_token_id=__a ,bos_token_id=__a ,eos_token_id=__a ,**__a )
A = vocab_size
A = max_position_embeddings
A = hidden_size
A = visual_embedding_dim
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = type_vocab_size
A = layer_norm_eps
A = bypass_transformer
A = special_visual_initialize | 719 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _snake_case ( snake_case__ : Optional[int] ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' )
download_parser.add_argument(
'--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,)
download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' )
download_parser.set_defaults(func=A_ )
def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]:
A = model
A = cache
A = force
A = trust_remote_code
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code ) | 22 | 0 |
"""simple docstring"""
from __future__ import annotations
def _snake_case ( snake_case__ : list[int] ):
return len(set(_UpperCamelCase ) ) == len(_UpperCamelCase )
if __name__ == "__main__":
import doctest
doctest.testmod() | 720 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''spm_char.model'''}
_lowercase = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
_lowercase = {
'''microsoft/speecht5_asr''': 10_24,
'''microsoft/speecht5_tts''': 10_24,
'''microsoft/speecht5_vc''': 10_24,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[Any]="</s>" ,A_ : Optional[Any]="<unk>" ,A_ : str="<pad>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[str] ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Any:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Optional[int] ,A_ : str ) -> Tuple:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> List[Any]:
A = self.sp_model.IdToPiece(A_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]:
A = []
A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
A = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : Optional[int]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ )
A = [1]
if token_ids_a is None:
return ([0] * len(A_ )) + suffix_ones
return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 22 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Union
import numpy as np
from .feature_extraction_utils import BatchFeature, FeatureExtractionMixin
from .utils import PaddingStrategy, TensorType, is_tf_tensor, is_torch_tensor, logging, to_numpy
_lowercase = logging.get_logger(__name__)
class __lowercase ( __a ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ,**A_ : Any ) -> Union[str, Any]:
A = feature_size
A = sampling_rate
A = padding_value
A = kwargs.pop('padding_side' ,'right' )
A = kwargs.pop('return_attention_mask' ,A__ )
super().__init__(**A__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : List[str] = True ,A_ : Optional[Any] = None ,A_ : Union[str, Any] = False ,A_ : List[Any] = None ,A_ : Union[str, Any] = None ,A_ : str = None ,) -> BatchFeature:
# If we have a list of dicts, let's convert it in a dict of lists
# We do this to allow using this method as a collate_fn function in PyTorch Dataloader
if isinstance(A__ ,(list, tuple) ) and isinstance(processed_features[0] ,(dict, BatchFeature) ):
A = {
key: [example[key] for example in processed_features] for key in processed_features[0].keys()
}
# The model's main input name, usually `input_values`, has be passed for padding
if self.model_input_names[0] not in processed_features:
raise ValueError(
'You should supply an instance of `transformers.BatchFeature` or list of `transformers.BatchFeature`'
F' to this method that includes {self.model_input_names[0]}, but you provided'
F' {list(processed_features.keys() )}' )
A = processed_features[self.model_input_names[0]]
A = (
return_attention_mask if return_attention_mask is not None else self.return_attention_mask
)
if len(A__ ) == 0:
if return_attention_mask:
A = []
return processed_features
# If we have PyTorch/TF tensors or lists as inputs, we cast them as Numpy arrays
# and rebuild them afterwards if no return_tensors is specified
# Note that we lose the specific device the tensor may be on for PyTorch
A = required_input[0]
if isinstance(A__ ,(list, tuple) ):
# first_element might be an empty list/tuple in some edge cases so we grab the first non empty element.
A = 0
while len(required_input[index] ) == 0:
index += 1
if index < len(A__ ):
A = required_input[index][0]
if return_tensors is None:
if is_tf_tensor(A__ ):
A = 'tf'
elif is_torch_tensor(A__ ):
A = 'pt'
elif isinstance(A__ ,(int, float, list, tuple, np.ndarray) ):
A = 'np'
else:
raise ValueError(
F'type of {first_element} unknown: {type(A__ )}. '
'Should be one of a python, numpy, pytorch or tensorflow object.' )
for key, value in processed_features.items():
if isinstance(value[0] ,(int, float) ):
A = to_numpy(A__ )
else:
A = [to_numpy(A__ ) for v in value]
# Convert padding_strategy in PaddingStrategy
A = self._get_padding_strategies(padding=A__ ,max_length=A__ )
A = processed_features[self.model_input_names[0]]
A = len(A__ )
if not all(len(A__ ) == batch_size for v in processed_features.values() ):
raise ValueError('Some items in the output dictionary have a different batch size than others.' )
A = []
for i in range(A__ ):
A = {k: v[i] for k, v in processed_features.items()}
# truncation
A = self._truncate(
A__ ,max_length=A__ ,pad_to_multiple_of=A__ ,truncation=A__ ,)
truncated_inputs.append(A__ )
if padding_strategy == PaddingStrategy.LONGEST:
# make sure that `max_length` cannot be longer than the longest truncated length
A = max(len(input_slice[self.model_input_names[0]] ) for input_slice in truncated_inputs )
A = PaddingStrategy.MAX_LENGTH
A = {}
for i in range(A__ ):
# padding
A = self._pad(
truncated_inputs[i] ,max_length=A__ ,padding_strategy=A__ ,pad_to_multiple_of=A__ ,return_attention_mask=A__ ,)
for key, value in outputs.items():
if key not in batch_outputs:
A = []
if value.dtype is np.dtype(np.floataa ):
A = value.astype(np.floataa )
batch_outputs[key].append(A__ )
return BatchFeature(A__ ,tensor_type=A__ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : int ,A_ : Union[str, Any] = None ,A_ : str = PaddingStrategy.DO_NOT_PAD ,A_ : Dict = None ,A_ : List[str] = None ,) -> dict:
A = processed_features[self.model_input_names[0]]
if padding_strategy == PaddingStrategy.LONGEST:
A = len(A__ )
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A = padding_strategy != PaddingStrategy.DO_NOT_PAD and len(A__ ) < max_length
if return_attention_mask and "attention_mask" not in processed_features:
A = np.ones(len(A__ ) ,dtype=np.intaa )
if needs_to_be_padded:
A = max_length - len(A__ )
if self.padding_side == "right":
if return_attention_mask:
A = np.pad(
processed_features['attention_mask'] ,(0, difference) )
A = ((0, difference), (0, 0)) if self.feature_size > 1 else (0, difference)
A = np.pad(
A__ ,A__ ,'constant' ,constant_values=self.padding_value )
elif self.padding_side == "left":
if return_attention_mask:
A = np.pad(
processed_features['attention_mask'] ,(difference, 0) )
A = ((difference, 0), (0, 0)) if self.feature_size > 1 else (difference, 0)
A = np.pad(
A__ ,A__ ,'constant' ,constant_values=self.padding_value )
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return processed_features
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Optional[Any] ,A_ : Union[str, Any] = None ,A_ : Tuple = None ,A_ : List[Any] = None ,) -> Union[str, Any]:
if not truncation:
return processed_features
elif truncation and max_length is None:
raise ValueError('When setting ``truncation=True``, make sure that ``max_length`` is defined.' )
A = processed_features[self.model_input_names[0]]
# find `max_length` that fits `pad_to_multiple_of`
if max_length is not None and pad_to_multiple_of is not None and (max_length % pad_to_multiple_of != 0):
A = ((max_length // pad_to_multiple_of) + 1) * pad_to_multiple_of
A = len(A__ ) > max_length
if needs_to_be_truncated:
A = processed_features[self.model_input_names[0]][:max_length]
if "attention_mask" in processed_features:
A = processed_features['attention_mask'][:max_length]
return processed_features
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int]=False ,A_ : Tuple=None ) -> Union[str, Any]:
# Get padding strategy
if padding is not False:
if padding is True:
A = PaddingStrategy.LONGEST # Default to pad to the longest sequence in the batch
elif not isinstance(A__ ,A__ ):
A = PaddingStrategy(A__ )
elif isinstance(A__ ,A__ ):
A = padding
else:
A = PaddingStrategy.DO_NOT_PAD
# Set max length if needed
if max_length is None:
if padding_strategy == PaddingStrategy.MAX_LENGTH:
raise ValueError(
F'When setting ``padding={PaddingStrategy.MAX_LENGTH}``, make sure that max_length is defined' )
# Test if we have a padding value
if padding_strategy != PaddingStrategy.DO_NOT_PAD and (self.padding_value is None):
raise ValueError(
'Asking to pad but the feature_extractor does not have a padding value. Please select a value to use'
' as `padding_value`. For example: `feature_extractor.padding_value = 0.0`.' )
return padding_strategy | 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPFeatureExtractor''']
_lowercase = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 0 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
_lowercase = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
_lowercase = {'''facebook/blenderbot_small-90M''': 5_12}
def _snake_case ( snake_case__ : Dict ):
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
A = set(_SCREAMING_SNAKE_CASE )
return pairs
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[int] = VOCAB_FILES_NAMES
_lowerCamelCase: List[str] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: str = ['input_ids', 'attention_mask']
def __init__( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : Union[str, Any]="__start__" ,A_ : List[Any]="__end__" ,A_ : Optional[Any]="__unk__" ,A_ : Any="__null__" ,**A_ : int ,) -> Any:
super().__init__(unk_token=A_ ,bos_token=A_ ,eos_token=A_ ,pad_token=A_ ,**A_ )
with open(A_ ,encoding='utf-8' ) as vocab_handle:
A = json.load(A_ )
A = {v: k for k, v in self.encoder.items()}
with open(A_ ,encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[1:-1]
A = [tuple(merge.split() ) for merge in merges]
A = dict(zip(A_ ,range(len(A_ ) ) ) )
A = {}
@property
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
return len(self.encoder )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
return dict(self.encoder ,**self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : str ) -> str:
if token in self.cache:
return self.cache[token]
A = re.sub('([.,!?()])' ,R' \1' ,A_ )
A = re.sub('(\')' ,R' \1 ' ,A_ )
A = re.sub(R'\s{2,}' ,' ' ,A_ )
if "\n" in token:
A = token.replace('\n' ,' __newln__' )
A = token.split(' ' )
A = []
for token in tokens:
if not len(A_ ):
continue
A = token.lower()
A = tuple(A_ )
A = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
A = get_pairs(A_ )
if not pairs:
words.append(A_ )
continue
while True:
A = min(A_ ,key=lambda A_ : self.bpe_ranks.get(A_ ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A , A = bigram
A = []
A = 0
while i < len(A_ ):
try:
A = word.index(A_ ,A_ )
new_word.extend(word[i:j] )
A = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(A_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(A_ )
A = new_word
if len(A_ ) == 1:
break
else:
A = get_pairs(A_ )
A = '@@ '.join(A_ )
A = word[:-4]
A = word
words.append(A_ )
return " ".join(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : str ) -> List[str]:
A = []
A = re.findall(R'\S+\n?' ,A_ )
for token in words:
split_tokens.extend(list(self.bpe(A_ ).split(' ' ) ) )
return split_tokens
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : str ) -> int:
A = token.lower()
return self.encoder.get(A_ ,self.encoder.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int ) -> str:
return self.decoder.get(A_ ,self.unk_token )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str] ) -> str:
A = ' '.join(A_ ).replace('@@ ' ,'' ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(A_ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=A_ ,ensure_ascii=A_ ) + '\n' )
A = 0
with open(A_ ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(A_ ) + '\n' )
index += 1
return vocab_file, merge_file | 700 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any ,A_ : Optional[Any] ,A_ : Tuple=13 ,A_ : Optional[Any]=7 ,A_ : Dict=True ,A_ : Optional[Any]=True ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : Optional[Any]=True ,A_ : Tuple=False ,A_ : Optional[int]=False ,A_ : str=False ,A_ : int=2 ,A_ : Union[str, Any]=99 ,A_ : int=0 ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : Any=4 ,A_ : str=0.1 ,A_ : Any=0.1 ,A_ : int=512 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.02 ,A_ : Optional[Any]=2 ,A_ : List[str]=4 ,A_ : Optional[int]="last" ,A_ : str=True ,A_ : List[str]=None ,A_ : List[Any]=0 ,) -> int:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : int ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,) -> Tuple:
A = XLMModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,lengths=A_ ,langs=A_ )
A = model(A_ ,langs=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ,A_ : Any ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : List[str] ,A_ : List[str] ,) -> Union[str, Any]:
A = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : Tuple ,A_ : str ,A_ : int ,A_ : str ,A_ : Optional[Any] ,A_ : Any ,A_ : Any ,A_ : Dict ,) -> List[str]:
A = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
A = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : List[str] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : Any ,) -> Optional[int]:
A = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)
((A) , ) = result_with_labels.to_tuple()
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
((A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : str ,) -> List[Any]:
A = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : str ,A_ : Tuple ,A_ : List[str] ,A_ : Dict ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Dict ,A_ : Any ,) -> Any:
A = self.num_labels
A = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : str ,A_ : int ,A_ : Tuple ,A_ : List[Any] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : int ,) -> Tuple:
A = self.num_choices
A = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: Dict = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase: Optional[Any] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : str=False ) -> Dict:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = XLMModelTester(self )
A = ConfigTester(self ,config_class=A_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : List[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Tuple=1 ) -> List[Any]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) )
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = min_length + idx + 1
A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Tuple=False ,A_ : Optional[Any]=1 ) -> List[str]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,)
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,)
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(A_ )
A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president
A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A = model.generate(A_ ,do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ ) | 22 | 0 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .tokenization_wavaveca import WavaVecaCTCTokenizer
class lowerCAmelCase_ ( __snake_case ):
'''simple docstring'''
_lowerCamelCase: int = '''Wav2Vec2FeatureExtractor'''
_lowerCamelCase: str = '''AutoTokenizer'''
def __init__( self : Any ,A_ : Any ,A_ : Optional[Any] ) -> Optional[int]:
super().__init__(__UpperCamelCase ,__UpperCamelCase )
A = self.feature_extractor
A = False
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Union[str, Any] ,A_ : Tuple ,**A_ : Any ) -> str:
try:
return super().from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
except OSError:
warnings.warn(
F'Loading a tokenizer inside {cls.__name__} from a config that does not'
' include a `tokenizer_class` attribute is deprecated and will be '
'removed in v5. Please add `\'tokenizer_class\': \'Wav2Vec2CTCTokenizer\'`'
' attribute to either your `config.json` or `tokenizer_config.json` '
'file to suppress this warning: ' ,__UpperCamelCase ,)
A = WavaVecaFeatureExtractor.from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
A = WavaVecaCTCTokenizer.from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
return cls(feature_extractor=__UpperCamelCase ,tokenizer=__UpperCamelCase )
def __call__( self : Tuple ,*A_ : List[str] ,**A_ : int ) -> Optional[Any]:
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase ,**__UpperCamelCase )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
A = kwargs.pop('raw_speech' )
else:
A = kwargs.pop('audio' ,__UpperCamelCase )
A = kwargs.pop('sampling_rate' ,__UpperCamelCase )
A = kwargs.pop('text' ,__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
A = args[0]
A = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
A = self.feature_extractor(__UpperCamelCase ,*__UpperCamelCase ,sampling_rate=__UpperCamelCase ,**__UpperCamelCase )
if text is not None:
A = self.tokenizer(__UpperCamelCase ,**__UpperCamelCase )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A = encodings['input_ids']
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,*A_ : Optional[int] ,**A_ : Any ) -> Dict:
if self._in_target_context_manager:
return self.current_processor.pad(*__UpperCamelCase ,**__UpperCamelCase )
A = kwargs.pop('input_features' ,__UpperCamelCase )
A = kwargs.pop('labels' ,__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
A = args[0]
A = args[1:]
if input_features is not None:
A = self.feature_extractor.pad(__UpperCamelCase ,*__UpperCamelCase ,**__UpperCamelCase )
if labels is not None:
A = self.tokenizer.pad(__UpperCamelCase ,**__UpperCamelCase )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A = labels['input_ids']
return input_features
def _SCREAMING_SNAKE_CASE ( self : Tuple ,*A_ : Any ,**A_ : Optional[Any] ) -> Optional[Any]:
return self.tokenizer.batch_decode(*__UpperCamelCase ,**__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,*A_ : List[Any] ,**A_ : List[Any] ) -> Tuple:
return self.tokenizer.decode(*__UpperCamelCase ,**__UpperCamelCase )
@contextmanager
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
A = True
A = self.tokenizer
yield
A = self.feature_extractor
A = False | 701 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_lowercase = 8
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int]=BITS ):
A = x.device
A = (x * 255).int().clamp(0 , 255 )
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b c h w -> b c 1 h w' )
A = ((x & mask) != 0).float()
A = rearrange(snake_case__ , 'b c d h w -> b (c d) h w' )
A = bits * 2 - 1
return bits
def _snake_case ( snake_case__ : Any , snake_case__ : Any=BITS ):
A = x.device
A = (x > 0).int()
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b (c d) h w -> b c d h w' , d=8 )
A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = True , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
A = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
A = self.alphas_cumprod[timestep]
A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
A = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
A = self._get_variance(snake_case__ , snake_case__ )
A = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
A = model_output.device if torch.is_tensor(snake_case__ ) else 'cpu'
A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ )
A = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise
A = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Tuple="epsilon" , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
A = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
A , A = torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
A = None
# 1. compute alphas, betas
A = self.alphas_cumprod[t]
A = self.alphas_cumprod[t - 1] if t > 0 else self.one
A = 1 - alpha_prod_t
A = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
A = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A = 0
if t > 0:
A = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device )
A = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,A_ : UNetaDConditionModel ,A_ : Union[DDIMScheduler, DDPMScheduler] ,A_ : Optional[float] = 1.0 ,) -> Optional[int]:
super().__init__()
A = bit_scale
A = (
ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A_ ,scheduler=A_ )
@torch.no_grad()
def __call__( self : Tuple ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 50 ,A_ : Optional[torch.Generator] = None ,A_ : Optional[int] = 1 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]:
A = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,)
A = decimal_to_bits(A_ ) * self.bit_scale
A = latents.to(self.device )
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
A = self.unet(A_ ,A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample
A = bits_to_decimal(A_ )
if output_type == "pil":
A = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ ) | 22 | 0 |
"""simple docstring"""
from collections import deque
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : int ,A_ : Tuple ,A_ : Tuple ) -> None:
A = process_name # process name
A = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
A = arrival_time
A = burst_time # remaining burst time
A = 0 # total time of the process wait in ready queue
A = 0 # time from arrival time to completion time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Tuple ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[Any] ,) -> None:
A = number_of_queues
# time slice of queues that round robin algorithm applied
A = time_slices
# unfinished process is in this ready_queue
A = queue
# current time
A = current_time
# finished process is in this sequence queue
A = deque()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> list[str]:
A = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> list[int]:
A = []
for i in range(len(lowercase__ ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Any ) -> list[int]:
A = []
for i in range(len(lowercase__ ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> list[int]:
A = []
for i in range(len(lowercase__ ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : str ) -> list[int]:
return [q.burst_time for q in queue]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Any ) -> int:
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[int] ) -> deque[Process]:
A = deque() # sequence deque of finished process
while len(lowercase__ ) != 0:
A = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(lowercase__ )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
A = 0
# set the process's turnaround time because it is finished
A = self.current_time - cp.arrival_time
# set the completion time
A = self.current_time
# add the process to queue that has finished queue
finished.append(lowercase__ )
self.finish_queue.extend(lowercase__ ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Any ,A_ : List[Any] ) -> tuple[deque[Process], deque[Process]]:
A = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(lowercase__ ) ):
A = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(lowercase__ )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
A = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(lowercase__ )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
A = 0
# set the finish time
A = self.current_time
# update the process' turnaround time because it is finished
A = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(lowercase__ )
self.finish_queue.extend(lowercase__ ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> deque[Process]:
for i in range(self.number_of_queues - 1 ):
A = self.round_robin(
self.ready_queue ,self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
_lowercase = Process('''P1''', 0, 53)
_lowercase = Process('''P2''', 0, 17)
_lowercase = Process('''P3''', 0, 68)
_lowercase = Process('''P4''', 0, 24)
_lowercase = 3
_lowercase = [17, 25]
_lowercase = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
_lowercase = Process('''P1''', 0, 53)
_lowercase = Process('''P2''', 0, 17)
_lowercase = Process('''P3''', 0, 68)
_lowercase = Process('''P4''', 0, 24)
_lowercase = 3
_lowercase = [17, 25]
_lowercase = deque([Pa, Pa, Pa, Pa])
_lowercase = MLFQ(number_of_queues, time_slices, queue, 0)
_lowercase = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"""waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"""completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"""turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"""
)
# print sequence of finished processes
print(
F"""sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"""
) | 702 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''yolos'''
def __init__( self : Dict ,A_ : Optional[Any]=768 ,A_ : int=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.0 ,A_ : List[Any]=0.0 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : List[Any]=[512, 864] ,A_ : Union[str, Any]=16 ,A_ : List[str]=3 ,A_ : Optional[int]=True ,A_ : Tuple=100 ,A_ : str=True ,A_ : Optional[Any]=False ,A_ : Any=1 ,A_ : Optional[Any]=5 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=5 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.1 ,**A_ : Tuple ,) -> Any:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return 12 | 22 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any ) -> Tuple:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[int] ) -> Any:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : List[str] ,A_ : str ) -> List[Any]:
if nodea not in self.connections:
self.add_node(lowercase__ )
if nodea not in self.connections:
self.add_node(lowercase__ )
A = probability
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Dict:
return list(self.connections )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Any ) -> Optional[int]:
A = 0
A = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def _snake_case ( snake_case__ : str , snake_case__ : list[tuple[str, str, float]] , snake_case__ : int ):
A = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A = Counter(graph.get_nodes() )
A = start
for _ in range(SCREAMING_SNAKE_CASE__ ):
A = graph.transition(SCREAMING_SNAKE_CASE__ )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod()
| 703 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=0 ) -> Dict:
A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) )
A = np.random.RandomState(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = ort.SessionOptions()
A = False
return options
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 22 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
_lowercase = None
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
_lowercase = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
_lowercase = {
'''albert-base-v1''': 5_12,
'''albert-large-v1''': 5_12,
'''albert-xlarge-v1''': 5_12,
'''albert-xxlarge-v1''': 5_12,
'''albert-base-v2''': 5_12,
'''albert-large-v2''': 5_12,
'''albert-xlarge-v2''': 5_12,
'''albert-xxlarge-v2''': 5_12,
}
_lowercase = '''▁'''
class lowerCAmelCase_ ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
_lowerCamelCase: str = VOCAB_FILES_NAMES
_lowerCamelCase: Tuple = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: int = AlbertTokenizer
def __init__( self : int ,A_ : Optional[Any]=None ,A_ : Dict=None ,A_ : Optional[int]=True ,A_ : str=True ,A_ : Any=False ,A_ : Union[str, Any]="[CLS]" ,A_ : str="[SEP]" ,A_ : Union[str, Any]="<unk>" ,A_ : Union[str, Any]="[SEP]" ,A_ : List[Any]="<pad>" ,A_ : Optional[Any]="[CLS]" ,A_ : Optional[Any]="[MASK]" ,**A_ : List[Any] ,) -> Union[str, Any]:
# Mask token behave like a normal word, i.e. include the space before it and
# is included in the raw text, there should be a match in a non-normalized sentence.
A = (
AddedToken(__snake_case ,lstrip=__snake_case ,rstrip=__snake_case ,normalized=__snake_case )
if isinstance(__snake_case ,__snake_case )
else mask_token
)
super().__init__(
__snake_case ,tokenizer_file=__snake_case ,do_lower_case=__snake_case ,remove_space=__snake_case ,keep_accents=__snake_case ,bos_token=__snake_case ,eos_token=__snake_case ,unk_token=__snake_case ,sep_token=__snake_case ,pad_token=__snake_case ,cls_token=__snake_case ,mask_token=__snake_case ,**__snake_case ,)
A = do_lower_case
A = remove_space
A = keep_accents
A = vocab_file
A = False if not self.vocab_file else True
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> Union[str, Any]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[int] ,A_ : Optional[List[int]] = None ) -> List[Any]:
A = [self.sep_token_id]
A = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : str ,A_ : Optional[str] = None ) -> Optional[Any]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(__snake_case ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
__snake_case ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__snake_case ):
copyfile(self.vocab_file ,__snake_case )
return (out_vocab_file,) | 704 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''pixel_values''']
def __init__( self : Optional[Any] ,A_ : bool = True ,A_ : Optional[Dict[str, int]] = None ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Optional[Any] ,) -> None:
super().__init__(**A_ )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(A_ ,default_to_square=A_ )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ ,default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ )
return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : float ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[str] ) -> np.ndarray:
return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Any ,) -> np.ndarray:
return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : ImageInput ,A_ : Optional[bool] = None ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : Optional[bool] = None ,A_ : Optional[float] = None ,A_ : Optional[bool] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**A_ : Tuple ,) -> List[Any]:
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(A_ ,default_to_square=A_ )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(A_ ) for image in images]
if do_resize:
A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images]
if do_center_crop:
A = [self.center_crop(image=A_ ,size=A_ ) for image in images]
if do_rescale:
A = [self.rescale(image=A_ ,scale=A_ ) for image in images]
if do_normalize:
A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images]
A = [to_channel_dimension_format(A_ ,A_ ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=A_ ,tensor_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : List[Tuple] = None ) -> str:
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(A_ ):
A = target_sizes.numpy()
A = []
for idx in range(len(A_ ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=A_ )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 22 | 0 |
"""simple docstring"""
import os
import sys
import transformers
_lowercase = """3"""
print("""Python version:""", sys.version)
print("""transformers version:""", transformers.__version__)
try:
import torch
print("""Torch version:""", torch.__version__)
print("""Cuda available:""", torch.cuda.is_available())
print("""Cuda version:""", torch.version.cuda)
print("""CuDNN version:""", torch.backends.cudnn.version())
print("""Number of GPUs available:""", torch.cuda.device_count())
print("""NCCL version:""", torch.cuda.nccl.version())
except ImportError:
print("""Torch version:""", None)
try:
import deepspeed
print("""DeepSpeed version:""", deepspeed.__version__)
except ImportError:
print("""DeepSpeed version:""", None)
try:
import tensorflow as tf
print("""TensorFlow version:""", tf.__version__)
print("""TF GPUs available:""", bool(tf.config.list_physical_devices("""GPU""")))
print("""Number of TF GPUs available:""", len(tf.config.list_physical_devices("""GPU""")))
except ImportError:
print("""TensorFlow version:""", None) | 705 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowercase = data_utils.TransfoXLTokenizer
_lowercase = data_utils.TransfoXLCorpus
_lowercase = data_utils
_lowercase = data_utils
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case__ , 'rb' ) as fp:
A = pickle.load(snake_case__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
A = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
A = corpus.vocab.__dict__
torch.save(snake_case__ , snake_case__ )
A = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , snake_case__ )
A = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(snake_case__ , snake_case__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
A = os.path.abspath(snake_case__ )
A = os.path.abspath(snake_case__ )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
A = TransfoXLConfig()
else:
A = TransfoXLConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
A = TransfoXLLMHeadModel(snake_case__ )
A = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
A = os.path.join(snake_case__ , snake_case__ )
A = os.path.join(snake_case__ , snake_case__ )
print(F'Save PyTorch model to {os.path.abspath(snake_case__ )}' )
torch.save(model.state_dict() , snake_case__ )
print(F'Save configuration file to {os.path.abspath(snake_case__ )}' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
_lowercase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 22 | 0 |
"""simple docstring"""
import json
import os
import unittest
from transformers.models.roc_bert.tokenization_roc_bert import (
VOCAB_FILES_NAMES,
RoCBertBasicTokenizer,
RoCBertTokenizer,
RoCBertWordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase_ ( _UpperCamelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = RoCBertTokenizer
_lowerCamelCase: Dict = None
_lowerCamelCase: List[Any] = False
_lowerCamelCase: Union[str, Any] = True
_lowerCamelCase: Union[str, Any] = filter_non_english
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
super().setUp()
A = ["[UNK]", "[CLS]", "[SEP]", "[PAD]", "[MASK]", "你", "好", "是", "谁", "a", "b", "c", "d"]
A = {}
A = {}
for i, value in enumerate(__a ):
A = i
A = i
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['word_shape_file'] )
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['word_pronunciation_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
with open(self.word_shape_file ,'w' ,encoding='utf-8' ) as word_shape_writer:
json.dump(__a ,__a ,ensure_ascii=__a )
with open(self.word_pronunciation_file ,'w' ,encoding='utf-8' ) as word_pronunciation_writer:
json.dump(__a ,__a ,ensure_ascii=__a )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file )
A = tokenizer.tokenize('你好[SEP]你是谁' )
self.assertListEqual(__a ,['你', '好', '[SEP]', '你', '是', '谁'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__a ) ,[5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_shape_ids(__a ) ,[5, 6, 2, 5, 7, 8] )
self.assertListEqual(tokenizer.convert_tokens_to_pronunciation_ids(__a ) ,[5, 6, 2, 5, 7, 8] )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = RoCBertBasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) ,['ah', '\u535A', '\u63A8', 'zz'] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
A = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = RoCBertBasicTokenizer(do_lower_case=__a ,strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['h\u00E9llo'] )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = RoCBertBasicTokenizer(do_lower_case=__a ,strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = RoCBertBasicTokenizer(do_lower_case=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
A = RoCBertBasicTokenizer(do_lower_case=__a ,strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[Any]:
A = RoCBertBasicTokenizer(do_lower_case=__a ,strip_accents=__a )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = RoCBertBasicTokenizer(do_lower_case=__a ,never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A = ["[UNK]", "[CLS]", "[SEP]", "want", "##want", "##ed", "wa", "un", "runn", "##ing"]
A = {}
for i, token in enumerate(__a ):
A = i
A = RoCBertWordpieceTokenizer(vocab=__a ,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) ,[] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) ,['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) ,['[UNK]', 'runn', '##ing'] )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
A = self.get_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__a ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
if self.test_rust_tokenizer:
A = self.get_rust_tokenizer()
self.assertListEqual(
[rust_tokenizer.tokenize(__a ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A = self.rust_tokenizer_class.from_pretrained(__a ,**__a )
A = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
A = tokenizer_r.encode_plus(
__a ,return_attention_mask=__a ,return_token_type_ids=__a ,return_offsets_mapping=__a ,add_special_tokens=__a ,)
A = tokenizer_r.do_lower_case if hasattr(__a ,'do_lower_case' ) else False
A = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), "A"),
((1, 2), ","),
((3, 5), "na"),
((5, 6), "##ï"),
((6, 8), "##ve"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "Allen"),
((21, 23), "##NL"),
((23, 24), "##P"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), "a"),
((1, 2), ","),
((3, 8), "naive"),
((9, 15), tokenizer_r.mask_token),
((16, 21), "allen"),
((21, 23), "##nl"),
((23, 24), "##p"),
((25, 33), "sentence"),
((33, 34), "."),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens['offset_mapping'] )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
A = ["的", "人", "有"]
A = "".join(__a )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A = True
A = self.tokenizer_class.from_pretrained(__a ,**__a )
A = self.rust_tokenizer_class.from_pretrained(__a ,**__a )
A = tokenizer_p.encode(__a ,add_special_tokens=__a )
A = tokenizer_r.encode(__a ,add_special_tokens=__a )
A = tokenizer_r.convert_ids_to_tokens(__a )
A = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__a ,__a )
self.assertListEqual(__a ,__a )
A = False
A = self.rust_tokenizer_class.from_pretrained(__a ,**__a )
A = self.tokenizer_class.from_pretrained(__a ,**__a )
A = tokenizer_r.encode(__a ,add_special_tokens=__a )
A = tokenizer_p.encode(__a ,add_special_tokens=__a )
A = tokenizer_r.convert_ids_to_tokens(__a )
A = tokenizer_p.convert_ids_to_tokens(__a )
# it is expected that only the first Chinese character is not preceded by "##".
A = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(__a )
]
self.assertListEqual(__a ,__a )
self.assertListEqual(__a ,__a )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = self.tokenizer_class(self.vocab_file ,self.word_shape_file ,self.word_pronunciation_file )
A = tokenizer.encode('你好' ,add_special_tokens=__a )
A = tokenizer.encode('你是谁' ,add_special_tokens=__a )
A = tokenizer.build_inputs_with_special_tokens(__a )
A = tokenizer.build_inputs_with_special_tokens(__a ,__a )
assert encoded_sentence == [1] + text + [2]
assert encoded_pair == [1] + text + [2] + text_a + [2]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = self.get_tokenizers(do_lower_case=__a )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
A = "你好,你是谁"
A = tokenizer.tokenize(__a )
A = tokenizer.convert_tokens_to_ids(__a )
A = tokenizer.convert_tokens_to_shape_ids(__a )
A = tokenizer.convert_tokens_to_pronunciation_ids(__a )
A = tokenizer.prepare_for_model(
__a ,__a ,__a ,add_special_tokens=__a )
A = tokenizer.encode_plus(__a ,add_special_tokens=__a )
self.assertEqual(__a ,__a )
| 706 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ) -> int:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Optional[Any]=1 ) -> int:
if self.graph.get(A_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A = [[w, v]]
if not self.graph.get(A_ ):
A = []
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ) -> Optional[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int=-2 ,A_ : Dict=-1 ) -> str:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any=-1 ) -> int:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any]=-2 ) -> Optional[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ) -> Any:
A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> str:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any]=-2 ) -> Any:
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return sorted_nodes
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple=-2 ,A_ : List[str]=-1 ) -> str:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any]=-2 ) -> Dict:
A = time()
self.bfs(A_ )
A = time()
return end - begin
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> Tuple:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[str]=1 ) -> Dict:
# check if the u exists
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A = [[w, v]]
# add the other way
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A = [[w, u]]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : List[str] ) -> List[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
# the other way round
if self.graph.get(A_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=-2 ,A_ : List[Any]=-1 ) -> int:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=-1 ) -> List[Any]:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict=-2 ) -> List[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[Any]:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any]=-2 ,A_ : List[str]=-1 ) -> Any:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=-2 ) -> Union[str, Any]:
A = time()
self.bfs(A_ )
A = time()
return end - begin | 22 | 0 |
"""simple docstring"""
import argparse
_lowercase = '''docs/source/_static/js/custom.js'''
def _snake_case ( snake_case__ : str ):
with open(__UpperCamelCase , encoding='utf-8' , newline='\n' ) as f:
A = f.readlines()
A = 0
# First let's put the right version
while not lines[index].startswith('const stableVersion =' ):
index += 1
A = F'const stableVersion = "v{version}"\n'
# Then update the dictionary
while not lines[index].startswith('const versionMapping = {' ):
index += 1
# We go until the end
while not lines[index].startswith('}' ):
index += 1
# We add the new version at the end
lines[index - 1] += F' "v{version}": "v{version}",\n'
with open(__UpperCamelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(__UpperCamelCase )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--version''', help='''Release version.''')
_lowercase = parser.parse_args()
update_custom_js(args.version) | 707 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _snake_case ( snake_case__ : str = "isbn/0140328726" ):
A = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
A = F'{olid} is not a valid Open Library olid'
raise ValueError(snake_case__ )
return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json()
def _snake_case ( snake_case__ : dict ):
A = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
A = data['First sentence']['value']
for key, value in data.items():
if isinstance(snake_case__ , snake_case__ ):
A = ', '.join(snake_case__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowercase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
_lowercase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""") | 22 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import BatchEncoding
from transformers.models.bert.tokenization_bert import (
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.models.prophetnet.tokenization_prophetnet import VOCAB_FILES_NAMES, ProphetNetTokenizer
from transformers.testing_utils import require_torch, slow
from ...test_tokenization_common import TokenizerTesterMixin
class lowerCAmelCase_ ( _lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = ProphetNetTokenizer
_lowerCamelCase: List[str] = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
super().setUp()
A = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[str]:
A = 'UNwant\u00E9d,running'
A = 'unwanted, running'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
A = self.tokenizer_class(self.vocab_file )
A = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(_lowerCAmelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(_lowerCAmelCase ) ,[9, 6, 7, 12, 10, 11] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) ,['ah', '\u535A', '\u63A8', 'zz'] )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
A = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['h\u00E9llo'] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
A = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
A = BasicTokenizer(do_lower_case=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
A = BasicTokenizer(do_lower_case=_lowerCAmelCase ,strip_accents=_lowerCAmelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
A = BasicTokenizer(do_lower_case=_lowerCAmelCase ,never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
A = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
A = {}
for i, token in enumerate(_lowerCAmelCase ):
A = i
A = WordpieceTokenizer(vocab=_lowerCAmelCase ,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) ,[] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) ,['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) ,['[UNK]', 'runn', '##ing'] )
@require_torch
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
A = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
A = [1037, 2146, 2_0423, 2005, 7680, 7849, 3989, 1012, 102]
A = tokenizer(_lowerCAmelCase ,padding=_lowerCAmelCase ,return_tensors='pt' )
self.assertIsInstance(_lowerCAmelCase ,_lowerCAmelCase )
A = list(batch.input_ids.numpy()[0] )
self.assertListEqual(_lowerCAmelCase ,_lowerCAmelCase )
self.assertEqual((2, 9) ,batch.input_ids.shape )
self.assertEqual((2, 9) ,batch.attention_mask.shape )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = self.tokenizer_class.from_pretrained('microsoft/prophetnet-large-uncased' )
A = tokenizer.encode('sequence builders' ,add_special_tokens=_lowerCAmelCase )
A = tokenizer.encode('multi-sequence build' ,add_special_tokens=_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase )
A = tokenizer.build_inputs_with_special_tokens(_lowerCAmelCase ,_lowerCAmelCase )
assert encoded_sentence == text + [102]
assert encoded_pair == text + [102] + text_a + [102] | 708 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''PerceiverFeatureExtractor''']
_lowercase = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 0 |
from __future__ import annotations
import matplotlib.pyplot as plt # type: ignore
import numpy
# initial triangle of Koch snowflake
_lowercase = numpy.array([0, 0])
_lowercase = numpy.array([0.5, 0.8_660_254])
_lowercase = numpy.array([1, 0])
_lowercase = [VECTOR_1, VECTOR_2, VECTOR_3, VECTOR_1]
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : int ):
A = initial_vectors
for _ in range(snake_case_ ):
A = iteration_step(snake_case_ )
return vectors
def _snake_case ( snake_case__ : Union[str, Any] ):
A = []
for i, start_vector in enumerate(vectors[:-1] ):
A = vectors[i + 1]
new_vectors.append(snake_case_ )
A = end_vector - start_vector
new_vectors.append(start_vector + difference_vector / 3 )
new_vectors.append(
start_vector + difference_vector / 3 + rotate(difference_vector / 3 , 60 ) )
new_vectors.append(start_vector + difference_vector * 2 / 3 )
new_vectors.append(vectors[-1] )
return new_vectors
def _snake_case ( snake_case__ : int , snake_case__ : Union[str, Any] ):
A = numpy.radians(snake_case_ )
A = numpy.cos(snake_case_ ), numpy.sin(snake_case_ )
A = numpy.array(((c, -s), (s, c)) )
return numpy.dot(snake_case_ , snake_case_ )
def _snake_case ( snake_case__ : List[str] ):
A = plt.gca()
axes.set_aspect('equal' )
# matplotlib.pyplot.plot takes a list of all x-coordinates and a list of all
# y-coordinates as inputs, which are constructed from the vector-list using
# zip()
A = zip(*snake_case_ )
plt.plot(snake_case_ , snake_case_ )
plt.show()
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = iterate(INITIAL_VECTORS, 5)
plot(processed_vectors) | 709 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _snake_case ( snake_case__ : int ):
A = SwinvaConfig()
A = swinva_name.split('_' )
A = name_split[1]
if "to" in name_split[3]:
A = int(name_split[3][-3:] )
else:
A = int(name_split[3] )
if "to" in name_split[2]:
A = int(name_split[2][-2:] )
else:
A = int(name_split[2][6:] )
if model_size == "tiny":
A = 96
A = (2, 2, 6, 2)
A = (3, 6, 12, 24)
elif model_size == "small":
A = 96
A = (2, 2, 18, 2)
A = (3, 6, 12, 24)
elif model_size == "base":
A = 128
A = (2, 2, 18, 2)
A = (4, 8, 16, 32)
else:
A = 192
A = (2, 2, 18, 2)
A = (6, 12, 24, 48)
if "to" in swinva_name:
A = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
A = 2_1841
A = 'huggingface/label-files'
A = 'imagenet-22k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
else:
A = 1000
A = 'huggingface/label-files'
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = img_size
A = num_classes
A = embed_dim
A = depths
A = num_heads
A = window_size
return config
def _snake_case ( snake_case__ : List[Any] ):
if "patch_embed.proj" in name:
A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
A = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
A = 'encoder.' + name
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
A = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
A = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
A = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
A = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
A = 'layernorm.weight'
if name == "norm.bias":
A = 'layernorm.bias'
if "head" in name:
A = name.replace('head' , 'classifier' )
else:
A = 'swinv2.' + name
return name
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ):
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A = key.split('.' )
A = int(key_split[1] )
A = int(key_split[3] )
A = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[
dim : dim * 2
]
A = val[-dim:]
else:
A = val
return orig_state_dict
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ):
A = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A = get_swinva_config(snake_case__ )
A = SwinvaForImageClassification(snake_case__ )
model.eval()
A = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A = image_processor(images=snake_case__ , return_tensors='pt' )
A = timm_model(inputs['pixel_values'] )
A = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowercase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path) | 22 | 0 |
"""simple docstring"""
import numpy as np
import torch
import torch.nn as nn
from transformers import CLIPConfig, CLIPVisionModelWithProjection, PreTrainedModel
from ...utils import logging
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( SCREAMING_SNAKE_CASE_ ):
'''simple docstring'''
_lowerCamelCase: Tuple = CLIPConfig
_lowerCamelCase: Optional[int] = ['''CLIPEncoderLayer''']
def __init__( self : List[str] ,A_ : Any ) -> List[Any]:
super().__init__(UpperCamelCase__ )
A = CLIPVisionModelWithProjection(config.vision_config )
A = nn.Linear(config.vision_config.projection_dim ,1 )
A = nn.Linear(config.vision_config.projection_dim ,1 )
@torch.no_grad()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Optional[Any]=0.5 ,A_ : Optional[Any]=0.5 ) -> Tuple:
A = self.vision_model(UpperCamelCase__ )[0]
A = self.p_head(UpperCamelCase__ )
A = nsfw_detected.flatten()
A = nsfw_detected > p_threshold
A = nsfw_detected.tolist()
if any(UpperCamelCase__ ):
logger.warning(
'Potential NSFW content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, nsfw_detected_ in enumerate(UpperCamelCase__ ):
if nsfw_detected_:
A = np.zeros(images[idx].shape )
A = self.w_head(UpperCamelCase__ )
A = watermark_detected.flatten()
A = watermark_detected > w_threshold
A = watermark_detected.tolist()
if any(UpperCamelCase__ ):
logger.warning(
'Potential watermarked content was detected in one or more images. A black image will be returned instead.'
' Try again with a different prompt and/or seed.' )
for idx, watermark_detected_ in enumerate(UpperCamelCase__ ):
if watermark_detected_:
A = np.zeros(images[idx].shape )
return images, nsfw_detected, watermark_detected | 710 |
"""simple docstring"""
from math import pi, sqrt
def _snake_case ( snake_case__ : float ):
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(snake_case__ ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(snake_case__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _snake_case ( ):
assert gamma(0.5 ) == sqrt(snake_case__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = 1.0
while num:
_lowercase = float(input('''Gamma of: '''))
print(F"""gamma({num}) = {gamma(num)}""")
print('''\nEnter 0 to exit...''') | 22 | 0 |
"""simple docstring"""
from collections import deque
def _snake_case ( snake_case__ : Tuple ):
A = len(lowerCamelCase_ )
A = deque()
A = [False for _ in range(lowerCamelCase_ )]
A = [-1 for _ in range(lowerCamelCase_ )]
A = index_of[:]
def strong_connect(snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
A = index # the number when this node is seen
A = index # lowest rank node reachable from here
index += 1
stack.append(lowerCamelCase_ )
A = True
for w in g[v]:
if index_of[w] == -1:
A = strong_connect(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
A = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
elif on_stack[w]:
A = (
lowlink_of[w] if lowlink_of[w] < lowlink_of[v] else lowlink_of[v]
)
if lowlink_of[v] == index_of[v]:
A = []
A = stack.pop()
A = False
component.append(lowerCamelCase_ )
while w != v:
A = stack.pop()
A = False
component.append(lowerCamelCase_ )
components.append(lowerCamelCase_ )
return index
A = []
for v in range(lowerCamelCase_ ):
if index_of[v] == -1:
strong_connect(lowerCamelCase_ , 0 , lowerCamelCase_ )
return components
def _snake_case ( snake_case__ : Any , snake_case__ : Tuple ):
A = [[] for _ in range(lowerCamelCase_ )]
for u, v in edges:
g[u].append(lowerCamelCase_ )
return g
if __name__ == "__main__":
# Test
_lowercase = 7
_lowercase = [0, 0, 1, 2, 3, 3, 4, 4, 6]
_lowercase = [1, 3, 2, 0, 1, 4, 5, 6, 5]
_lowercase = [(u, v) for u, v in zip(source, target)]
_lowercase = create_graph(n_vertices, edges)
assert [[5], [6], [4], [3, 2, 1, 0]] == tarjan(g) | 711 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: torch.FloatTensor
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : Dict=3 ,A_ : int=3 ,A_ : str=("DownEncoderBlock2D",) ,A_ : Dict=(64,) ,A_ : str=2 ,A_ : Union[str, Any]=32 ,A_ : Optional[int]="silu" ,A_ : str=True ,) -> Union[str, Any]:
super().__init__()
A = layers_per_block
A = torch.nn.Convad(
A_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
# down
A = block_out_channels[0]
for i, down_block_type in enumerate(A_ ):
A = output_channel
A = block_out_channels[i]
A = i == len(A_ ) - 1
A = get_down_block(
A_ ,num_layers=self.layers_per_block ,in_channels=A_ ,out_channels=A_ ,add_downsample=not is_final_block ,resnet_eps=1e-6 ,downsample_padding=0 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,)
self.down_blocks.append(A_ )
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# out
A = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = 2 * out_channels if double_z else out_channels
A = nn.Convad(block_out_channels[-1] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = x
A = self.conv_in(A_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : Dict ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
# down
if is_torch_version('>=' ,'1.11.0' ):
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,use_reentrant=A_ )
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,use_reentrant=A_ )
else:
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ )
# middle
A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,A_ )
else:
# down
for down_block in self.down_blocks:
A = down_block(A_ )
# middle
A = self.mid_block(A_ )
# post-process
A = self.conv_norm_out(A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any]=3 ,A_ : Optional[int]=3 ,A_ : str=("UpDecoderBlock2D",) ,A_ : Any=(64,) ,A_ : Optional[int]=2 ,A_ : Optional[int]=32 ,A_ : Tuple="silu" ,A_ : Optional[int]="group" ,) -> Any:
super().__init__()
A = layers_per_block
A = nn.Convad(
A_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
A = in_channels if norm_type == 'spatial' else None
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# up
A = list(reversed(A_ ) )
A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
A = output_channel
A = reversed_block_out_channels[i]
A = i == len(A_ ) - 1
A = get_up_block(
A_ ,num_layers=self.layers_per_block + 1 ,in_channels=A_ ,out_channels=A_ ,prev_output_channel=A_ ,add_upsample=not is_final_block ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,resnet_time_scale_shift=A_ ,)
self.up_blocks.append(A_ )
A = output_channel
# out
if norm_type == "spatial":
A = SpatialNorm(block_out_channels[0] ,A_ )
else:
A = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = nn.Convad(block_out_channels[0] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Union[str, Any]=None ) -> Any:
A = z
A = self.conv_in(A_ )
A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : List[Any] ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
if is_torch_version('>=' ,'1.11.0' ):
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ ,use_reentrant=A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,A_ ,use_reentrant=A_ )
else:
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ,A_ )
else:
# middle
A = self.mid_block(A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = up_block(A_ ,A_ )
# post-process
if latent_embeds is None:
A = self.conv_norm_out(A_ )
else:
A = self.conv_norm_out(A_ ,A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict=None ,A_ : List[Any]="random" ,A_ : Optional[int]=False ,A_ : str=True ) -> List[str]:
super().__init__()
A = n_e
A = vq_embed_dim
A = beta
A = legacy
A = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
A = remap
if self.remap is not None:
self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) )
A = self.used.shape[0]
A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A = self.re_embed
A = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
A = n_e
A = sane_index_shape
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Any:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
A = (inds[:, :, None] == used[None, None, ...]).long()
A = match.argmax(-1 )
A = match.sum(2 ) < 1
if self.unknown_index == "random":
A = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
A = self.unknown_index
return new.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
if self.re_embed > self.used.shape[0]: # extra token
A = 0 # simply set to zero
A = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,A_ )
return back.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> str:
# reshape z -> (batch, height, width, channel) and flatten
A = z.permute(0 ,2 ,3 ,1 ).contiguous()
A = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A = torch.argmin(torch.cdist(A_ ,self.embedding.weight ) ,dim=1 )
A = self.embedding(A_ ).view(z.shape )
A = None
A = None
# compute loss for embedding
if not self.legacy:
A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A = z + (z_q - z).detach()
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
A = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
A = self.remap_to_used(A_ )
A = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
A = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : str ) -> Union[str, Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A = indices.reshape(shape[0] ,-1 ) # add batch axis
A = self.unmap_to_all(A_ )
A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A = self.embedding(A_ )
if shape is not None:
A = z_q.view(A_ )
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : str ,A_ : Tuple ,A_ : Dict=False ) -> List[str]:
A = parameters
A , A = torch.chunk(A_ ,2 ,dim=1 )
A = torch.clamp(self.logvar ,-30.0 ,20.0 )
A = deterministic
A = torch.exp(0.5 * self.logvar )
A = torch.exp(self.logvar )
if self.deterministic:
A = A = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
A = randn_tensor(
self.mean.shape ,generator=A_ ,device=self.parameters.device ,dtype=self.parameters.dtype )
A = self.mean + self.std * sample
return x
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple=None ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : Union[str, Any]=[1, 2, 3] ) -> List[str]:
if self.deterministic:
return torch.Tensor([0.0] )
A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return self.mean | 22 | 0 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class lowerCAmelCase_ ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Any ,A_ : Any ) -> Dict:
super().__init__()
A = model
A = 2
A = nn.Linear(self.model.config.hidden_size ,self.num_labels )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
pass
def _snake_case ( snake_case__ : str , snake_case__ : str , snake_case__ : str ):
A = LongformerModel.from_pretrained(__snake_case )
A = LightningModel(__snake_case )
A = torch.load(__snake_case , map_location=torch.device('cpu' ) )
lightning_model.load_state_dict(ckpt['state_dict'] )
# init longformer question answering model
A = LongformerForQuestionAnswering.from_pretrained(__snake_case )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(__snake_case )
print(F'Conversion successful. Model saved under {pytorch_dump_folder_path}' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
) | 712 |
"""simple docstring"""
def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ):
A = len(snake_case__ )
A = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
A = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
A = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 | 0 |
import warnings
from functools import wraps
from typing import Callable
def _snake_case ( snake_case__ : int ):
@wraps(a__ )
def _inner_fn(*snake_case__ : List[Any] , **snake_case__ : Optional[Any] ):
warnings.warn(
(F'\'{fn.__name__}\' is experimental and might be subject to breaking changes in the future.') , a__ , )
return fn(*a__ , **a__ )
return _inner_fn | 713 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[int]=2 ,A_ : Any=True ,A_ : List[str]=False ,A_ : Tuple=10 ,A_ : List[Any]=3 ,A_ : Any=32 * 8 ,A_ : Dict=32 * 8 ,A_ : List[Any]=4 ,A_ : Tuple=64 ,) -> List[str]:
A = parent
A = batch_size
A = is_training
A = use_auxiliary_loss
A = num_queries
A = num_channels
A = min_size
A = max_size
A = num_labels
A = hidden_dim
A = hidden_dim
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A_ )
A = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=A_ )
A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=A_ ) > 0.5
).float()
A = (torch.rand((self.batch_size, self.num_labels) ,device=A_ ) > 0.5).long()
A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
A = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
A = self.num_queries
A = self.num_labels
A = [1, 1, 1, 1]
A = self.num_channels
A = 64
A = 128
A = self.hidden_dim
A = self.hidden_dim
A = self.hidden_dim
return config
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A , A , A , A , A = self.prepare_config_and_inputs()
A = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = output.encoder_hidden_states
A = output.pixel_decoder_hidden_states
A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,config.decoder_layers )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : List[str] ,A_ : Union[str, Any]=False ) -> str:
with torch.no_grad():
A = MaskaFormerModel(config=A_ )
model.to(A_ )
model.eval()
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ ,output_hidden_states=A_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Any ,A_ : Dict ,A_ : Any ,A_ : Dict ) -> Optional[Any]:
A = MaskaFormerForUniversalSegmentation(config=A_ )
model.to(A_ )
model.eval()
def comm_check_on_output(A_ : str ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ )
comm_check_on_output(A_ )
A = model(
pixel_values=A_ ,pixel_mask=A_ ,mask_labels=A_ ,class_labels=A_ )
comm_check_on_output(A_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowerCamelCase: Optional[Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_lowerCamelCase: int = False
_lowerCamelCase: Dict = False
_lowerCamelCase: List[str] = False
_lowerCamelCase: int = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = MaskaFormerModelTester(self )
A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
A = MaskaFormerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = (self.model_tester.min_size,) * 2
A = {
'pixel_values': torch.randn((2, 3, *size) ,device=A_ ),
'mask_labels': torch.randn((2, 10, *size) ,device=A_ ),
'class_labels': torch.zeros(2 ,10 ,device=A_ ).long(),
}
A = self.model_tester.get_config()
A = MaskaFormerForUniversalSegmentation(A_ ).to(A_ )
A = model(**A_ )
self.assertTrue(outputs.loss is not None )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ ).to(A_ )
A = model(**A_ ,output_attentions=A_ )
self.assertTrue(outputs.attentions is not None )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
if not self.model_tester.is_training:
return
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = model_class(A_ )
model.to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = True
A = True
A = model_class(A_ ).to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ )
A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowercase = 1e-4
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A_ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
A = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
# masks_queries_logits
A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
A = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
A = torch.tensor(A_ ).to(A_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,A_ ,atol=A_ ) )
# class_queries_logits
A = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
A = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(A_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
A = inputs['pixel_values'].to(A_ )
A = [el.to(A_ ) for el in inputs['mask_labels']]
A = [el.to(A_ ) for el in inputs['class_labels']]
with torch.no_grad():
A = model(**A_ )
self.assertTrue(outputs.loss is not None ) | 22 | 0 |
import inspect
import unittest
from transformers import SegformerConfig, is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_MAPPING,
SegformerForImageClassification,
SegformerForSemanticSegmentation,
SegformerModel,
)
from transformers.models.segformer.modeling_segformer import SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import SegformerImageProcessor
class lowerCAmelCase_ ( UpperCamelCase__ ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
A = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(__A ,'hidden_sizes' ) )
self.parent.assertTrue(hasattr(__A ,'num_attention_heads' ) )
self.parent.assertTrue(hasattr(__A ,'num_encoder_blocks' ) )
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[str] ,A_ : Optional[Any] ,A_ : Tuple=13 ,A_ : List[Any]=64 ,A_ : int=3 ,A_ : Dict=4 ,A_ : str=[2, 2, 2, 2] ,A_ : Optional[Any]=[8, 4, 2, 1] ,A_ : str=[16, 32, 64, 128] ,A_ : Any=[1, 4, 8, 16] ,A_ : Tuple=[1, 2, 4, 8] ,A_ : List[Any]=True ,A_ : List[str]=True ,A_ : str="gelu" ,A_ : str=0.1 ,A_ : Dict=0.1 ,A_ : Optional[Any]=0.02 ,A_ : Union[str, Any]=3 ,A_ : Any=None ,) -> List[Any]:
A = parent
A = batch_size
A = image_size
A = num_channels
A = num_encoder_blocks
A = sr_ratios
A = depths
A = hidden_sizes
A = downsampling_rates
A = num_attention_heads
A = is_training
A = use_labels
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = num_labels
A = scope
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A = None
if self.use_labels:
A = ids_tensor([self.batch_size, self.image_size, self.image_size] ,self.num_labels )
A = self.get_config()
return config, pixel_values, labels
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
return SegformerConfig(
image_size=self.image_size ,num_channels=self.num_channels ,num_encoder_blocks=self.num_encoder_blocks ,depths=self.depths ,hidden_sizes=self.hidden_sizes ,num_attention_heads=self.num_attention_heads ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,initializer_range=self.initializer_range ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Tuple ,A_ : Optional[int] ,A_ : str ) -> Tuple:
A = SegformerModel(config=__A )
model.to(__A )
model.eval()
A = model(__A )
A = A = self.image_size // (self.downsampling_rates[-1] * 2)
self.parent.assertEqual(
result.last_hidden_state.shape ,(self.batch_size, self.hidden_sizes[-1], expected_height, expected_width) )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ,A_ : Any ,A_ : Union[str, Any] ) -> str:
A = self.num_labels
A = SegformerForSemanticSegmentation(__A )
model.to(__A )
model.eval()
A = model(__A )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
A = model(__A ,labels=__A )
self.parent.assertEqual(
result.logits.shape ,(self.batch_size, self.num_labels, self.image_size // 4, self.image_size // 4) )
self.parent.assertGreater(result.loss ,0.0 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ,A_ : int ,A_ : List[str] ) -> Dict:
A = 1
A = SegformerForSemanticSegmentation(config=__A )
model.to(__A )
model.eval()
A = torch.randint(0 ,1 ,(self.batch_size, self.image_size, self.image_size) ).to(__A )
A = model(__A ,labels=__A )
self.parent.assertGreater(result.loss ,0.0 )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
A = self.prepare_config_and_inputs()
A , A , A = config_and_inputs
A = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( UpperCamelCase__ , UpperCamelCase__ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = (
(
SegformerModel,
SegformerForSemanticSegmentation,
SegformerForImageClassification,
)
if is_torch_available()
else ()
)
_lowerCamelCase: List[str] = (
{
'''feature-extraction''': SegformerModel,
'''image-classification''': SegformerForImageClassification,
'''image-segmentation''': SegformerForSemanticSegmentation,
}
if is_torch_available()
else {}
)
_lowerCamelCase: int = True
_lowerCamelCase: int = False
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: Union[str, Any] = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
A = SegformerModelTester(self )
A = SegformerConfigTester(self ,config_class=__A )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> int:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__A )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_binary_image_segmentation(*__A )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_segmentation(*__A )
@unittest.skip('SegFormer does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
pass
@unittest.skip('SegFormer does not have get_input_embeddings method and get_output_embeddings methods' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(__A )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,__A )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
for model_class in self.all_model_classes:
A = True
A = False
A = True
A = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(__A ,__A ) )
A = outputs.attentions
A = sum(self.model_tester.depths )
self.assertEqual(len(__A ) ,__A )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
A = True
A = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(__A ,__A ) )
A = outputs.attentions
self.assertEqual(len(__A ) ,__A )
# verify the first attentions (first block, first layer)
A = (self.model_tester.image_size // 4) ** 2
A = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
# verify the last attentions (last block, last layer)
A = (self.model_tester.image_size // 32) ** 2
A = (self.model_tester.image_size // (32 * self.model_tester.sr_ratios[-1])) ** 2
self.assertListEqual(
list(attentions[-1].shape[-3:] ) ,[self.model_tester.num_attention_heads[-1], expected_seq_len, expected_reduced_seq_len] ,)
A = len(__A )
# Check attention is always last and order is fine
A = True
A = True
A = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(__A ,__A ) )
self.assertEqual(out_len + 1 ,len(__A ) )
A = outputs.attentions
self.assertEqual(len(__A ) ,__A )
# verify the first attentions (first block, first layer)
A = (self.model_tester.image_size // 4) ** 2
A = (self.model_tester.image_size // (4 * self.model_tester.sr_ratios[0])) ** 2
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) ,[self.model_tester.num_attention_heads[0], expected_seq_len, expected_reduced_seq_len] ,)
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
def check_hidden_states_output(A_ : Optional[Any] ,A_ : str ,A_ : List[str] ):
A = model_class(__A )
model.to(__A )
model.eval()
with torch.no_grad():
A = model(**self._prepare_for_class(__A ,__A ) )
A = outputs.hidden_states
A = self.model_tester.num_encoder_blocks
self.assertEqual(len(__A ) ,__A )
# verify the first hidden states (first block)
self.assertListEqual(
list(hidden_states[0].shape[-3:] ) ,[
self.model_tester.hidden_sizes[0],
self.model_tester.image_size // 4,
self.model_tester.image_size // 4,
] ,)
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = True
check_hidden_states_output(__A ,__A ,__A )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
A = True
check_hidden_states_output(__A ,__A ,__A )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
if not self.model_tester.is_training:
return
A , A = self.model_tester.prepare_config_and_inputs_for_common()
A = True
for model_class in self.all_model_classes:
if model_class in get_values(__A ):
continue
A = model_class(__A )
model.to(__A )
model.train()
A = self._prepare_for_class(__A ,__A ,return_labels=__A )
A = model(**__A ).loss
loss.backward()
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
for model_name in SEGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = SegformerModel.from_pretrained(__A )
self.assertIsNotNone(__A )
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
A = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=__A ,align=__A ,do_random_crop=__A )
A = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
__A )
A = prepare_img()
A = image_processor(images=__A ,return_tensors='pt' )
A = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
A = model(__A )
A = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,__A )
A = torch.tensor(
[
[[-4.63_10, -5.52_32, -6.23_56], [-5.19_21, -6.14_44, -6.59_96], [-5.44_24, -6.27_90, -6.75_74]],
[[-12.1391, -13.3122, -13.9554], [-12.8732, -13.9352, -14.3563], [-12.9438, -13.8226, -14.2513]],
[[-12.5134, -13.4686, -14.4915], [-12.8669, -14.4343, -14.7758], [-13.2523, -14.5819, -15.0694]],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,__A ,atol=1e-4 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
A = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=__A ,align=__A ,do_random_crop=__A )
A = SegformerForSemanticSegmentation.from_pretrained(
'nvidia/segformer-b1-finetuned-cityscapes-1024-1024' ).to(__A )
A = prepare_img()
A = image_processor(images=__A ,return_tensors='pt' )
A = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
A = model(__A )
A = torch.Size((1, model.config.num_labels, 128, 128) )
self.assertEqual(outputs.logits.shape ,__A )
A = torch.tensor(
[
[[-13.5748, -13.9111, -12.6500], [-14.3500, -15.3683, -14.2328], [-14.7532, -16.0424, -15.6087]],
[[-17.1651, -15.8725, -12.9653], [-17.2580, -17.3718, -14.8223], [-16.6058, -16.8783, -16.7452]],
[[-3.64_56, -3.02_09, -1.42_03], [-3.07_97, -3.19_59, -2.00_00], [-1.87_57, -1.92_17, -1.69_97]],
] ).to(__A )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3, :3] ,__A ,atol=1e-1 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
A = SegformerImageProcessor(
image_scale=(512, 512) ,keep_ratio=__A ,align=__A ,do_random_crop=__A )
A = SegformerForSemanticSegmentation.from_pretrained('nvidia/segformer-b0-finetuned-ade-512-512' ).to(
__A )
A = prepare_img()
A = image_processor(images=__A ,return_tensors='pt' )
A = encoded_inputs.pixel_values.to(__A )
with torch.no_grad():
A = model(__A )
A = outputs.logits.detach().cpu()
A = image_processor.post_process_semantic_segmentation(outputs=__A ,target_sizes=[(500, 300)] )
A = torch.Size((500, 300) )
self.assertEqual(segmentation[0].shape ,__A )
A = image_processor.post_process_semantic_segmentation(outputs=__A )
A = torch.Size((128, 128) )
self.assertEqual(segmentation[0].shape ,__A ) | 714 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = 99
A = 32
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.prepare_config_and_inputs()
A = True
A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict:
A = TFEsmModel(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]:
A = True
A = TFEsmModel(config=A_ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ ,encoder_hidden_states=A_ )
# Also check the case where encoder outputs are not passed
A = model(A_ ,attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict:
A = TFEsmForMaskedLM(config=A_ )
A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]:
A = self.num_labels
A = TFEsmForTokenClassification(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase: List[str] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = TFEsmModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFEsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A = model.get_bias()
assert isinstance(A_ ,A_ )
for k, v in name.items():
assert isinstance(A_ ,tf.Variable )
else:
A = model.get_output_embeddings()
assert x is None
A = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(A_ )[0]
A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,A_ )
# compare the actual values for a slice.
A = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A = model(A_ )[0]
# compare the actual values for a slice.
A = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) ) | 22 | 0 |
"""simple docstring"""
_lowercase = '''
# Transformers installation
! pip install transformers datasets
# To install from source instead of the last release, comment the command above and uncomment the following one.
# ! pip install git+https://github.com/huggingface/transformers.git
'''
_lowercase = [{'''type''': '''code''', '''content''': INSTALL_CONTENT}]
_lowercase = {
'''{processor_class}''': '''FakeProcessorClass''',
'''{model_class}''': '''FakeModelClass''',
'''{object_class}''': '''FakeObjectClass''',
} | 715 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
_lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
_lowercase = '''|'''.join(sys.argv[1:])
_lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""")
_lowercase = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''') | 22 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : int = 1000 ):
return sum(2 * a * ((a - 1) // 2) for a in range(3 , n + 1 ) )
if __name__ == "__main__":
print(solution()) | 716 |
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> int:
A = []
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Optional[int]:
return self.node_position[vertex]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> List[Any]:
A = pos
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Dict ,A_ : List[str] ) -> str:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A = 2 * start + 1
else:
A = 2 * start + 2
if heap[smallest_child] < heap[start]:
A , A = heap[smallest_child], positions[smallest_child]
A , A = (
heap[start],
positions[start],
)
A , A = temp, tempa
A = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,A_ )
self.top_to_bottom(A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Dict ,A_ : str ,A_ : Union[str, Any] ) -> Dict:
A = position[index]
while index != 0:
A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A = heap[parent]
A = position[parent]
self.set_position(position[parent] ,A_ )
else:
A = val
A = temp
self.set_position(A_ ,A_ )
break
A = parent
else:
A = val
A = temp
self.set_position(A_ ,0 )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Dict ) -> Union[str, Any]:
A = len(A_ ) // 2 - 1
for i in range(A_ ,-1 ,-1 ):
self.top_to_bottom(A_ ,A_ ,len(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Dict ) -> Union[str, Any]:
A = positions[0]
A = sys.maxsize
self.top_to_bottom(A_ ,0 ,len(A_ ) ,A_ )
return temp
def _snake_case ( snake_case__ : Dict ):
A = Heap()
A = [0] * len(snake_case__ )
A = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A = [] # Heap of Distance of vertices from their neighboring vertex
A = []
for vertex in range(len(snake_case__ ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case__ )
heap.node_position.append(snake_case__ )
A = []
A = 1
A = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A = 0
A = distance
heap.heapify(snake_case__ , snake_case__ )
for _ in range(1 , len(snake_case__ ) ):
A = heap.delete_minimum(snake_case__ , snake_case__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case__ )]
):
A = distance
heap.bottom_to_top(
snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ )
A = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowercase = int(input('''Enter number of edges: ''').strip())
_lowercase = defaultdict(list)
for _ in range(edges_number):
_lowercase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 22 | 0 |
"""simple docstring"""
import importlib
import inspect
import json
import os
import re
import shutil
import sys
from pathlib import Path
from typing import Dict, Optional, Union
from urllib import request
from huggingface_hub import HfFolder, cached_download, hf_hub_download, model_info
from packaging import version
from .. import __version__
from . import DIFFUSERS_DYNAMIC_MODULE_NAME, HF_MODULES_CACHE, logging
_lowercase = (
'''https://raw.githubusercontent.com/huggingface/diffusers/{revision}/examples/community/{pipeline}.py'''
)
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( ):
A = 'https://pypi.org/pypi/diffusers/json'
A = json.loads(request.urlopen(_lowercase ).read() )['releases'].keys()
return sorted(_lowercase , key=lambda snake_case__ : version.Version(_lowercase ) )
def _snake_case ( ):
if HF_MODULES_CACHE in sys.path:
return
sys.path.append(_lowercase )
os.makedirs(_lowercase , exist_ok=_lowercase )
A = Path(_lowercase ) / '__init__.py'
if not init_path.exists():
init_path.touch()
def _snake_case ( snake_case__ : Any ):
init_hf_modules()
A = Path(_lowercase ) / name
# If the parent module does not exist yet, recursively create it.
if not dynamic_module_path.parent.exists():
create_dynamic_module(dynamic_module_path.parent )
os.makedirs(_lowercase , exist_ok=_lowercase )
A = dynamic_module_path / '__init__.py'
if not init_path.exists():
init_path.touch()
def _snake_case ( snake_case__ : Union[str, Any] ):
with open(_lowercase , 'r' , encoding='utf-8' ) as f:
A = f.read()
# Imports of the form `import .xxx`
A = re.findall('^\s*import\s+\.(\S+)\s*$' , _lowercase , flags=re.MULTILINE )
# Imports of the form `from .xxx import yyy`
relative_imports += re.findall('^\s*from\s+\.(\S+)\s+import' , _lowercase , flags=re.MULTILINE )
# Unique-ify
return list(set(_lowercase ) )
def _snake_case ( snake_case__ : Any ):
A = False
A = [module_file]
A = []
# Let's recurse through all relative imports
while not no_change:
A = []
for f in files_to_check:
new_imports.extend(get_relative_imports(_lowercase ) )
A = Path(_lowercase ).parent
A = [str(module_path / m ) for m in new_imports]
A = [f for f in new_import_files if f not in all_relative_imports]
A = [F'{f}.py' for f in new_import_files]
A = len(_lowercase ) == 0
all_relative_imports.extend(_lowercase )
return all_relative_imports
def _snake_case ( snake_case__ : List[str] ):
with open(_lowercase , 'r' , encoding='utf-8' ) as f:
A = f.read()
# Imports of the form `import xxx`
A = re.findall('^\s*import\s+(\S+)\s*$' , _lowercase , flags=re.MULTILINE )
# Imports of the form `from xxx import yyy`
imports += re.findall('^\s*from\s+(\S+)\s+import' , _lowercase , flags=re.MULTILINE )
# Only keep the top-level module
A = [imp.split('.' )[0] for imp in imports if not imp.startswith('.' )]
# Unique-ify and test we got them all
A = list(set(_lowercase ) )
A = []
for imp in imports:
try:
importlib.import_module(_lowercase )
except ImportError:
missing_packages.append(_lowercase )
if len(_lowercase ) > 0:
raise ImportError(
'This modeling file requires the following packages that were not found in your environment: '
F'{", ".join(_lowercase )}. Run `pip install {" ".join(_lowercase )}`' )
return get_relative_imports(_lowercase )
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Dict ):
A = module_path.replace(os.path.sep , '.' )
A = importlib.import_module(_lowercase )
if class_name is None:
return find_pipeline_class(_lowercase )
return getattr(_lowercase , _lowercase )
def _snake_case ( snake_case__ : Optional[Any] ):
from ..pipelines import DiffusionPipeline
A = dict(inspect.getmembers(_lowercase , inspect.isclass ) )
A = None
for cls_name, cls in cls_members.items():
if (
cls_name != DiffusionPipeline.__name__
and issubclass(cls , _lowercase )
and cls.__module__.split('.' )[0] != "diffusers"
):
if pipeline_class is not None:
raise ValueError(
F'Multiple classes that inherit from {DiffusionPipeline.__name__} have been found:'
F' {pipeline_class.__name__}, and {cls_name}. Please make sure to define only one in'
F' {loaded_module}.' )
A = cls
return pipeline_class
def _snake_case ( snake_case__ : Tuple , snake_case__ : Union[str, Any] , snake_case__ : Any = None , snake_case__ : Union[str, Any] = False , snake_case__ : Tuple = False , snake_case__ : int = None , snake_case__ : Any = None , snake_case__ : Tuple = None , snake_case__ : Any = False , ):
A = str(_lowercase )
A = os.path.join(_lowercase , _lowercase )
if os.path.isfile(_lowercase ):
A = module_file_or_url
A = 'local'
elif pretrained_model_name_or_path.count('/' ) == 0:
A = get_diffusers_versions()
# cut ".dev0"
A = 'v' + '.'.join(__version__.split('.' )[:3] )
# retrieve github version that matches
if revision is None:
A = latest_version if latest_version[1:] in available_versions else 'main'
logger.info(F'Defaulting to latest_version: {revision}.' )
elif revision in available_versions:
A = F'v{revision}'
elif revision == "main":
A = revision
else:
raise ValueError(
F'`custom_revision`: {revision} does not exist. Please make sure to choose one of'
F' {", ".join(available_versions + ["main"] )}.' )
# community pipeline on GitHub
A = COMMUNITY_PIPELINES_URL.format(revision=_lowercase , pipeline=_lowercase )
try:
A = cached_download(
_lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , local_files_only=_lowercase , use_auth_token=_lowercase , )
A = 'git'
A = pretrained_model_name_or_path + '.py'
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
else:
try:
# Load from URL or cache if already cached
A = hf_hub_download(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , proxies=_lowercase , resume_download=_lowercase , local_files_only=_lowercase , use_auth_token=_lowercase , )
A = os.path.join('local' , '--'.join(pretrained_model_name_or_path.split('/' ) ) )
except EnvironmentError:
logger.error(F'Could not locate the {module_file} inside {pretrained_model_name_or_path}.' )
raise
# Check we have all the requirements in our environment
A = check_imports(_lowercase )
# Now we move the module inside our cached dynamic modules.
A = DIFFUSERS_DYNAMIC_MODULE_NAME + os.path.sep + submodule
create_dynamic_module(_lowercase )
A = Path(_lowercase ) / full_submodule
if submodule == "local" or submodule == "git":
# We always copy local files (we could hash the file to see if there was a change, and give them the name of
# that hash, to only copy when there is a modification but it seems overkill for now).
# The only reason we do the copy is to avoid putting too many folders in sys.path.
shutil.copy(_lowercase , submodule_path / module_file )
for module_needed in modules_needed:
A = F'{module_needed}.py'
shutil.copy(os.path.join(_lowercase , _lowercase ) , submodule_path / module_needed )
else:
# Get the commit hash
# TODO: we will get this info in the etag soon, so retrieve it from there and not here.
if isinstance(_lowercase , _lowercase ):
A = use_auth_token
elif use_auth_token is True:
A = HfFolder.get_token()
else:
A = None
A = model_info(_lowercase , revision=_lowercase , token=_lowercase ).sha
# The module file will end up being placed in a subfolder with the git hash of the repo. This way we get the
# benefit of versioning.
A = submodule_path / commit_hash
A = full_submodule + os.path.sep + commit_hash
create_dynamic_module(_lowercase )
if not (submodule_path / module_file).exists():
shutil.copy(_lowercase , submodule_path / module_file )
# Make sure we also have every file with relative
for module_needed in modules_needed:
if not (submodule_path / module_needed).exists():
get_cached_module_file(
_lowercase , F'{module_needed}.py' , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
return os.path.join(_lowercase , _lowercase )
def _snake_case ( snake_case__ : List[str] , snake_case__ : Tuple , snake_case__ : Tuple = None , snake_case__ : Any = None , snake_case__ : Any = False , snake_case__ : Tuple = False , snake_case__ : Any = None , snake_case__ : str = None , snake_case__ : List[str] = None , snake_case__ : Tuple = False , **snake_case__ : int , ):
A = get_cached_module_file(
_lowercase , _lowercase , cache_dir=_lowercase , force_download=_lowercase , resume_download=_lowercase , proxies=_lowercase , use_auth_token=_lowercase , revision=_lowercase , local_files_only=_lowercase , )
return get_class_in_module(_lowercase , final_module.replace('.py' , '' ) )
| 717 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase = True
except ImportError:
_lowercase = False
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( snake_case__ : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]:
A = testing
A = testing_file
A = path
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A = (
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file ,'r' ) as configuration_file:
A = json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,)
A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' ,'r' ) as configuration_file:
A = json.load(A_ )
A = configuration['lowercase_modelname']
A = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
A = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A = 'Flax' in generate_tensorflow_pytorch_and_flax
A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A_ ,exist_ok=A_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ):
pass
shutil.move(
F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(A_ : int ):
with open(A_ ,'r' ) as f:
A = f.readlines()
with open(A_ ,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A_ : str ,A_ : str ,A_ : List[str] ):
# Create temp file
A , A = mkstemp()
A = False
with fdopen(A_ ,'w' ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
A = True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A_ ,A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ ,A_ )
def skip_units(A_ : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A_ : Tuple ):
with open(A_ ) as datafile:
A = []
A = False
A = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# Below: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ ,A_ ,A_ )
A = []
elif "# Replace with" in line and "##" not in line:
A = []
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A_ ) | 22 | 0 |
"""simple docstring"""
from math import cos, sin, sqrt, tau
from audio_filters.iir_filter import IIRFilter
def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : float = 1 / sqrt(2 ) ):
A = tau * frequency / samplerate
A = sin(_lowerCamelCase )
A = cos(_lowerCamelCase )
A = _sin / (2 * q_factor)
A = (1 - _cos) / 2
A = 1 - _cos
A = 1 + alpha
A = -2 * _cos
A = 1 - alpha
A = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : float = 1 / sqrt(2 ) ):
A = tau * frequency / samplerate
A = sin(_lowerCamelCase )
A = cos(_lowerCamelCase )
A = _sin / (2 * q_factor)
A = (1 + _cos) / 2
A = -1 - _cos
A = 1 + alpha
A = -2 * _cos
A = 1 - alpha
A = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : float = 1 / sqrt(2 ) ):
A = tau * frequency / samplerate
A = sin(_lowerCamelCase )
A = cos(_lowerCamelCase )
A = _sin / (2 * q_factor)
A = _sin / 2
A = 0
A = -ba
A = 1 + alpha
A = -2 * _cos
A = 1 - alpha
A = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : float = 1 / sqrt(2 ) ):
A = tau * frequency / samplerate
A = sin(_lowerCamelCase )
A = cos(_lowerCamelCase )
A = _sin / (2 * q_factor)
A = 1 - alpha
A = -2 * _cos
A = 1 + alpha
A = IIRFilter(2 )
filt.set_coefficients([ba, ba, ba] , [ba, ba, ba] )
return filt
def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : float , snake_case__ : float = 1 / sqrt(2 ) , ):
A = tau * frequency / samplerate
A = sin(_lowerCamelCase )
A = cos(_lowerCamelCase )
A = _sin / (2 * q_factor)
A = 10 ** (gain_db / 40)
A = 1 + alpha * big_a
A = -2 * _cos
A = 1 - alpha * big_a
A = 1 + alpha / big_a
A = -2 * _cos
A = 1 - alpha / big_a
A = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : float , snake_case__ : float = 1 / sqrt(2 ) , ):
A = tau * frequency / samplerate
A = sin(_lowerCamelCase )
A = cos(_lowerCamelCase )
A = _sin / (2 * q_factor)
A = 10 ** (gain_db / 40)
A = (big_a + 1) - (big_a - 1) * _cos
A = (big_a + 1) + (big_a - 1) * _cos
A = (big_a - 1) - (big_a + 1) * _cos
A = (big_a - 1) + (big_a + 1) * _cos
A = 2 * sqrt(_lowerCamelCase ) * alpha
A = big_a * (pmc + aaa)
A = 2 * big_a * mpc
A = big_a * (pmc - aaa)
A = ppmc + aaa
A = -2 * pmpc
A = ppmc - aaa
A = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
def _snake_case ( snake_case__ : int , snake_case__ : int , snake_case__ : float , snake_case__ : float = 1 / sqrt(2 ) , ):
A = tau * frequency / samplerate
A = sin(_lowerCamelCase )
A = cos(_lowerCamelCase )
A = _sin / (2 * q_factor)
A = 10 ** (gain_db / 40)
A = (big_a + 1) - (big_a - 1) * _cos
A = (big_a + 1) + (big_a - 1) * _cos
A = (big_a - 1) - (big_a + 1) * _cos
A = (big_a - 1) + (big_a + 1) * _cos
A = 2 * sqrt(_lowerCamelCase ) * alpha
A = big_a * (ppmc + aaa)
A = -2 * big_a * pmpc
A = big_a * (ppmc - aaa)
A = pmc + aaa
A = 2 * mpc
A = pmc - aaa
A = IIRFilter(2 )
filt.set_coefficients([aa, aa, aa] , [ba, ba, ba] )
return filt
| 718 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str:
A = size if size is not None else {'height': 18, 'width': 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = ImageGPTImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'clusters' ) )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
self.assertTrue(hasattr(A_ ,'do_normalize' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
A = self.image_processing_class(**self.image_processor_dict )
A = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,obj[key] ) )
else:
self.assertEqual(obj[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(A_ ,'image_processor.json' )
image_processor_first.to_json_file(A_ )
A = self.image_processing_class.from_json_file(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(A_ )
A = self.image_processing_class.from_pretrained(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
pass
def _snake_case ( ):
A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
A = Image.open(dataset[4]['file'] )
A = Image.open(dataset[5]['file'] )
A = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
A = prepare_images()
# test non-batched
A = image_processing(images[0] ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1024) )
A = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ )
# test batched
A = image_processing(A_ ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1024) )
A = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ ) | 22 | 0 |
"""simple docstring"""
import warnings
from contextlib import contextmanager
from ....processing_utils import ProcessorMixin
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: str = '''MCTCTFeatureExtractor'''
_lowerCamelCase: Any = '''AutoTokenizer'''
def __init__( self : Tuple ,A_ : List[str] ,A_ : List[str] ) -> Dict:
super().__init__(A__ ,A__ )
A = self.feature_extractor
A = False
def __call__( self : Union[str, Any] ,*A_ : List[Any] ,**A_ : str ) -> Dict:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor(*A__ ,**A__ )
if "raw_speech" in kwargs:
warnings.warn('Using `raw_speech` as a keyword argument is deprecated. Use `audio` instead.' )
A = kwargs.pop('raw_speech' )
else:
A = kwargs.pop('audio' ,A__ )
A = kwargs.pop('sampling_rate' ,A__ )
A = kwargs.pop('text' ,A__ )
if len(A__ ) > 0:
A = args[0]
A = args[1:]
if audio is None and text is None:
raise ValueError('You need to specify either an `audio` or `text` input to process.' )
if audio is not None:
A = self.feature_extractor(A__ ,*A__ ,sampling_rate=A__ ,**A__ )
if text is not None:
A = self.tokenizer(A__ ,**A__ )
if text is None:
return inputs
elif audio is None:
return encodings
else:
A = encodings["""input_ids"""]
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,*A_ : Dict ,**A_ : str ) -> List[str]:
return self.tokenizer.batch_decode(*A__ ,**A__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,*A_ : str ,**A_ : List[Any] ) -> Optional[Any]:
# For backward compatibility
if self._in_target_context_manager:
return self.current_processor.pad(*A__ ,**A__ )
A = kwargs.pop('input_features' ,A__ )
A = kwargs.pop('labels' ,A__ )
if len(A__ ) > 0:
A = args[0]
A = args[1:]
if input_features is not None:
A = self.feature_extractor.pad(A__ ,*A__ ,**A__ )
if labels is not None:
A = self.tokenizer.pad(A__ ,**A__ )
if labels is None:
return input_features
elif input_features is None:
return labels
else:
A = labels["""input_ids"""]
return input_features
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,*A_ : str ,**A_ : List[str] ) -> List[str]:
return self.tokenizer.decode(*A__ ,**A__ )
@contextmanager
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
warnings.warn(
'`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your '
'labels by using the argument `text` of the regular `__call__` method (either in the same call as '
'your audio inputs, or in a separate call.' )
A = True
A = self.tokenizer
yield
A = self.feature_extractor
A = False | 719 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _snake_case ( snake_case__ : Optional[int] ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' )
download_parser.add_argument(
'--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,)
download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' )
download_parser.set_defaults(func=A_ )
def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]:
A = model
A = cache
A = force
A = trust_remote_code
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code ) | 22 | 0 |
"""simple docstring"""
import argparse
from transformers import TaConfig, TaForConditionalGeneration, load_tf_weights_in_ta
from transformers.utils import logging
logging.set_verbosity_info()
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] , snake_case__ : str ):
# Initialise PyTorch model
A = TaConfig.from_json_file(lowerCAmelCase__ )
print(F'Building PyTorch model from configuration: {config}' )
A = TaForConditionalGeneration(lowerCAmelCase__ )
# Load weights from tf checkpoint
load_tf_weights_in_ta(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
# Save pytorch-model
print(F'Save PyTorch model to {pytorch_dump_path}' )
model.save_pretrained(lowerCAmelCase__ )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained T5 model. \nThis specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowercase = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 720 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''spm_char.model'''}
_lowercase = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
_lowercase = {
'''microsoft/speecht5_asr''': 10_24,
'''microsoft/speecht5_tts''': 10_24,
'''microsoft/speecht5_vc''': 10_24,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[Any]="</s>" ,A_ : Optional[Any]="<unk>" ,A_ : str="<pad>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[str] ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Any:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Optional[int] ,A_ : str ) -> Tuple:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> List[Any]:
A = self.sp_model.IdToPiece(A_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]:
A = []
A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
A = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : Optional[int]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ )
A = [1]
if token_ids_a is None:
return ([0] * len(A_ )) + suffix_ones
return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 22 | 0 |
"""simple docstring"""
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
_lowercase = logging.getLogger(__name__)
_lowercase = 50 # max width of layer names
_lowercase = 70 # max width of quantizer names
def _snake_case ( snake_case__ : Optional[int] ):
A = parser.add_argument_group('quant_trainer arguments' )
group.add_argument('--wprec' , type=_lowerCamelCase , default=8 , help='weight precision' )
group.add_argument('--aprec' , type=_lowerCamelCase , default=8 , help='activation precision' )
group.add_argument('--quant-per-tensor' , action='store_true' , help='per tensor weight scaling' )
group.add_argument('--quant-disable' , action='store_true' , help='disable all quantizers' )
group.add_argument('--quant-disable-embeddings' , action='store_true' , help='disable all embeddings quantizers' )
group.add_argument('--quant-disable-keyword' , type=_lowerCamelCase , nargs='+' , help='disable quantizers by keyword' )
group.add_argument('--quant-disable-layer-module' , type=_lowerCamelCase , help='disable quantizers by keyword under layer.' )
group.add_argument('--quant-enable-layer-module' , type=_lowerCamelCase , help='enable quantizers by keyword under layer' )
group.add_argument('--calibrator' , default='max' , help='which quantization range calibrator to use' )
group.add_argument('--percentile' , default=_lowerCamelCase , type=_lowerCamelCase , help='percentile for PercentileCalibrator' )
group.add_argument('--fuse-qkv' , action='store_true' , help='use the same scale factor for qkv' )
group.add_argument('--clip-gelu' , metavar='N' , type=_lowerCamelCase , help='clip gelu output maximum value to N' )
group.add_argument(
'--recalibrate-weights' , action='store_true' , help=(
'recalibrate weight amaxes by taking the max of the weights.'
' amaxes will be computed with the current quantization granularity (axis).'
) , )
def _snake_case ( snake_case__ : str ):
if args.calibrator == "max":
A = 'max'
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError('Specify --percentile when using percentile calibrator' )
A = 'histogram'
elif args.calibrator == "mse":
A = 'histogram'
else:
raise ValueError(F'Invalid calibrator {args.calibrator}' )
A = QuantDescriptor(num_bits=args.aprec , calib_method=_lowerCamelCase )
A = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_lowerCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(_lowerCamelCase )
def _snake_case ( snake_case__ : List[str] , snake_case__ : Union[str, Any] , snake_case__ : Dict=False , snake_case__ : Union[str, Any]=False ):
logger.info('Configuring Model for Quantization' )
logger.info(F'using quantization package {pytorch_quantization.__file__}' )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_lowerCamelCase , ['embeddings'] , which='weight' , _disabled=_lowerCamelCase )
if args.quant_disable:
set_quantizer_by_name(_lowerCamelCase , [''] , _disabled=_lowerCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(_lowerCamelCase , args.quant_disable_keyword , _disabled=_lowerCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r'layer.\d+.' + args.quant_disable_layer_module] , _disabled=_lowerCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [r'layer.\d+.' + args.quant_enable_layer_module] , _disabled=_lowerCamelCase )
if args.recalibrate_weights:
recalibrate_weights(_lowerCamelCase )
if args.fuse_qkv:
fuse_qkv(_lowerCamelCase , _lowerCamelCase )
if args.clip_gelu:
clip_gelu(_lowerCamelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_lowerCamelCase )
def _snake_case ( snake_case__ : str ):
logger.info('Enabling Calibration' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F'{name:80}: {module}' )
def _snake_case ( snake_case__ : Tuple , snake_case__ : List[Any] ):
logger.info('Loading calibrated amax' )
for name, module in model.named_modules():
if name.endswith('_quantizer' ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax('percentile' , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_lowerCamelCase )
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Dict ):
def fusea(snake_case__ : int , snake_case__ : str , snake_case__ : List[str] ):
for mod in [qq, qk, qv]:
if not hasattr(_lowerCamelCase , '_amax' ):
print(' WARNING: NO AMAX BUFFER' )
return
A = qq._amax.detach().item()
A = qk._amax.detach().item()
A = qv._amax.detach().item()
A = max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
qq._amax.fill_(_lowerCamelCase )
qk._amax.fill_(_lowerCamelCase )
qv._amax.fill_(_lowerCamelCase )
logger.info(F' q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}' )
for name, mod in model.named_modules():
if name.endswith('.attention.self' ):
logger.info(F'FUSE_QKV: {name:{name_width}}' )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _snake_case ( snake_case__ : Any , snake_case__ : Dict ):
for name, mod in model.named_modules():
if name.endswith('.output.dense' ) and not name.endswith('attention.output.dense' ):
A = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_lowerCamelCase )
A = mod._input_quantizer._amax.data.detach().item()
logger.info(F'CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}' )
def _snake_case ( snake_case__ : int ):
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '_weight_quantizer' ) and mod._weight_quantizer.axis is not None:
A = mod.weight.shape[0]
A = mod._weight_quantizer._amax.detach()
A = torch.ones(_lowerCamelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F'expanding {name} {amax} -> {mod._weight_quantizer._amax}' )
def _snake_case ( snake_case__ : Dict ):
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '_weight_quantizer' ):
if not hasattr(mod.weight_quantizer , '_amax' ):
print('RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER' )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
A = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
A = set(range(len(mod.weight.size() ) ) ) - axis_set
A = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_lowerCamelCase , keepdims=_lowerCamelCase ).detach()
logger.info(F'RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}' )
A = amax
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Any=25 , snake_case__ : Optional[int]=180 , snake_case__ : Any=None ):
if ignore is None:
A = []
elif not isinstance(_lowerCamelCase , _lowerCamelCase ):
A = [ignore]
A = 0
for name, mod in model.named_modules():
if not hasattr(_lowerCamelCase , 'weight' ):
continue
A = max(_lowerCamelCase , len(_lowerCamelCase ) )
for name, mod in model.named_modules():
A = getattr(_lowerCamelCase , '_input_quantizer' , _lowerCamelCase )
A = getattr(_lowerCamelCase , '_weight_quantizer' , _lowerCamelCase )
if not hasattr(_lowerCamelCase , 'weight' ):
continue
if type(_lowerCamelCase ) in ignore:
continue
if [True for s in ignore if type(_lowerCamelCase ) is str and s in name]:
continue
A = F'Act:{input_q.extra_repr()}'
A = F'Wgt:{weight_q.extra_repr()}'
A = F'{name:{name_width}} {act_str} {wgt_str}'
if len(_lowerCamelCase ) <= line_width:
logger.info(_lowerCamelCase )
else:
logger.info(F'{name:{name_width}} {act_str}' )
logger.info(F'{" ":{name_width}} {wgt_str}' )
def _snake_case ( snake_case__ : Optional[Any] ):
A = 0
for name, mod in model.named_modules():
if isinstance(_lowerCamelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F'{name:80} {mod}' )
count += 1
print(F'{count} TensorQuantizers found in model' )
def _snake_case ( snake_case__ : Tuple , snake_case__ : List[str] , snake_case__ : str , snake_case__ : str , snake_case__ : str ):
A = getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if quantizer_mod is not None:
assert hasattr(_lowerCamelCase , _lowerCamelCase )
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
logger.warning(F'{name} has no {quantizer}' )
def _snake_case ( snake_case__ : List[str] , snake_case__ : Any , snake_case__ : Union[str, Any]="both" , **snake_case__ : Any ):
A = F'Warning: changing {which} quantizers of {name:{qname_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
if which in ["input", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , '_input_quantizer' , _lowerCamelCase , _lowerCamelCase )
if which in ["weight", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , '_weight_quantizer' , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase )
def _snake_case ( snake_case__ : Tuple , snake_case__ : Union[str, Any] , **snake_case__ : List[Any] ):
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , '_input_quantizer' ) or hasattr(_lowerCamelCase , '_weight_quantizer' ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
set_quantizers(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
elif name.endswith('_quantizer' ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
A = F'Warning: changing {name:{name_width}}'
for k, v in kwargs.items():
s += F' {k}={v}'
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase ) | 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPFeatureExtractor''']
_lowercase = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 0 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
def _snake_case ( snake_case__ : List[str] ):
A = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
A = 1024
A = 4096
A = 24
A = 16
A = [5, 11, 17, 23]
A = [256, 512, 1024, 1024]
A = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
A = 768
A = [1, 1, 1, 0.5]
A = [256, 512, 768, 768]
A = 150
A = 16
A = (1, 384, 384)
A = False
A = 'project'
if "ade" in checkpoint_url:
A = True
A = 768
A = [1, 1, 1, 0.5]
A = 150
A = 16
A = 'huggingface/label-files'
A = 'ade20k-id2label.json'
A = json.load(open(cached_download(hf_hub_url(snake_case__ , snake_case__ , repo_type='dataset' ) ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = [1, 150, 480, 480]
return config, expected_shape
def _snake_case ( snake_case__ : List[Any] ):
A = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : str ):
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
A = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
A = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
A = name.replace('patch_embed' , '' )
if "pos_embed" in name:
A = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
A = name.replace('proj' , 'projection' )
if "blocks" in name:
A = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
A = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
A = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
A = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
A = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
A = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
A = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
A = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
A = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
A = name.replace(F'refinenet{layer_idx}' , F'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
A = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
A = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
A = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
A = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
A = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
A = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
A = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
A = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
A = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
A = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
A = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
A = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
A = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
A = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
A = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
A = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
A = name.replace('pretrained' , 'dpt' )
if "bn" in name:
A = name.replace('bn' , 'batch_norm' )
if "head" in name:
A = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
A = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
A = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
A = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
A = name.replace('..' , '.' )
if "stem.conv" in name:
A = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
A = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
A = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
A = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
A = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
A = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
A = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def _snake_case ( snake_case__ : int , snake_case__ : Any ):
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.weight' )
A = state_dict.pop(F'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
A = in_proj_weight[: config.hidden_size, :]
A = in_proj_bias[: config.hidden_size]
A = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A = in_proj_weight[
-config.hidden_size :, :
]
A = in_proj_bias[-config.hidden_size :]
def _snake_case ( ):
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
return im
@torch.no_grad()
def _snake_case ( snake_case__ : Any , snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : Dict , snake_case__ : Tuple ):
A , A = get_dpt_config(snake_case__ )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
A = torch.load(snake_case__ , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(snake_case__ )
# rename keys
for key in state_dict.copy().keys():
A = state_dict.pop(snake_case__ )
A = val
# read in qkv matrices
read_in_q_k_v(snake_case__ , snake_case__ )
# load HuggingFace model
A = DPTForSemanticSegmentation(snake_case__ ) if 'ade' in checkpoint_url else DPTForDepthEstimation(snake_case__ )
model.load_state_dict(snake_case__ )
model.eval()
# Check outputs on an image
A = 480 if 'ade' in checkpoint_url else 384
A = DPTImageProcessor(size=snake_case__ )
A = prepare_img()
A = image_processor(snake_case__ , return_tensors='pt' )
# forward pass
A = model(**snake_case__ ).logits if 'ade' in checkpoint_url else model(**snake_case__ ).predicted_depth
if show_prediction:
A = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=snake_case__ , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(snake_case__ ).mkdir(exist_ok=snake_case__ )
print(F'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
_lowercase = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 700 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any ,A_ : Optional[Any] ,A_ : Tuple=13 ,A_ : Optional[Any]=7 ,A_ : Dict=True ,A_ : Optional[Any]=True ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : Optional[Any]=True ,A_ : Tuple=False ,A_ : Optional[int]=False ,A_ : str=False ,A_ : int=2 ,A_ : Union[str, Any]=99 ,A_ : int=0 ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : Any=4 ,A_ : str=0.1 ,A_ : Any=0.1 ,A_ : int=512 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.02 ,A_ : Optional[Any]=2 ,A_ : List[str]=4 ,A_ : Optional[int]="last" ,A_ : str=True ,A_ : List[str]=None ,A_ : List[Any]=0 ,) -> int:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : int ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,) -> Tuple:
A = XLMModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,lengths=A_ ,langs=A_ )
A = model(A_ ,langs=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ,A_ : Any ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : List[str] ,A_ : List[str] ,) -> Union[str, Any]:
A = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : Tuple ,A_ : str ,A_ : int ,A_ : str ,A_ : Optional[Any] ,A_ : Any ,A_ : Any ,A_ : Dict ,) -> List[str]:
A = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
A = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : List[str] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : Any ,) -> Optional[int]:
A = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)
((A) , ) = result_with_labels.to_tuple()
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
((A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : str ,) -> List[Any]:
A = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : str ,A_ : Tuple ,A_ : List[str] ,A_ : Dict ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Dict ,A_ : Any ,) -> Any:
A = self.num_labels
A = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : str ,A_ : int ,A_ : Tuple ,A_ : List[Any] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : int ,) -> Tuple:
A = self.num_choices
A = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: Dict = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase: Optional[Any] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : str=False ) -> Dict:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = XLMModelTester(self )
A = ConfigTester(self ,config_class=A_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : List[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Tuple=1 ) -> List[Any]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) )
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = min_length + idx + 1
A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Tuple=False ,A_ : Optional[Any]=1 ) -> List[str]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,)
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,)
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(A_ )
A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president
A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A = model.generate(A_ ,do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ ) | 22 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
_lowercase = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
_lowercase = {
'''ctrl''': 2_56,
}
_lowercase = {
'''Pregnancy''': 16_86_29,
'''Christianity''': 76_75,
'''Explain''': 10_64_23,
'''Fitness''': 6_34_40,
'''Saving''': 6_31_63,
'''Ask''': 2_71_71,
'''Ass''': 9_59_85,
'''Joke''': 16_35_09,
'''Questions''': 4_56_22,
'''Thoughts''': 4_96_05,
'''Retail''': 5_23_42,
'''Feminism''': 16_43_38,
'''Writing''': 1_19_92,
'''Atheism''': 19_22_63,
'''Netflix''': 4_86_16,
'''Computing''': 3_96_39,
'''Opinion''': 4_32_13,
'''Alone''': 4_49_67,
'''Funny''': 5_89_17,
'''Gaming''': 4_03_58,
'''Human''': 40_88,
'''India''': 13_31,
'''Joker''': 7_71_38,
'''Diet''': 3_62_06,
'''Legal''': 1_18_59,
'''Norman''': 49_39,
'''Tip''': 7_26_89,
'''Weight''': 5_23_43,
'''Movies''': 4_62_73,
'''Running''': 2_34_25,
'''Science''': 20_90,
'''Horror''': 3_77_93,
'''Confession''': 6_05_72,
'''Finance''': 1_22_50,
'''Politics''': 1_63_60,
'''Scary''': 19_19_85,
'''Support''': 1_26_54,
'''Technologies''': 3_25_16,
'''Teenage''': 6_61_60,
'''Event''': 3_27_69,
'''Learned''': 6_74_60,
'''Notion''': 18_27_70,
'''Wikipedia''': 3_75_83,
'''Books''': 66_65,
'''Extract''': 7_60_50,
'''Confessions''': 10_27_01,
'''Conspiracy''': 7_59_32,
'''Links''': 6_36_74,
'''Narcissus''': 15_04_25,
'''Relationship''': 5_47_66,
'''Relationships''': 13_47_96,
'''Reviews''': 4_16_71,
'''News''': 42_56,
'''Translation''': 2_68_20,
'''multilingual''': 12_84_06,
}
def _snake_case ( snake_case__ : Optional[Any] ):
A = set()
A = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
A = char
A = set(SCREAMING_SNAKE_CASE_ )
return pairs
class lowerCAmelCase_ ( _UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase: Dict = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Union[str, Any] = CONTROL_CODES
def __init__( self : Optional[Any] ,A_ : int ,A_ : str ,A_ : int="<unk>" ,**A_ : Union[str, Any] ) -> Optional[int]:
super().__init__(unk_token=lowercase__ ,**lowercase__ )
with open(lowercase__ ,encoding='utf-8' ) as vocab_handle:
A = json.load(lowercase__ )
A = {v: k for k, v in self.encoder.items()}
with open(lowercase__ ,encoding='utf-8' ) as merges_handle:
A = merges_handle.read().split('\n' )[1:-1]
A = [tuple(merge.split() ) for merge in merges]
A = dict(zip(lowercase__ ,range(len(lowercase__ ) ) ) )
A = {}
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
return len(self.encoder )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
return dict(self.encoder ,**self.added_tokens_encoder )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> Any:
if token in self.cache:
return self.cache[token]
A = tuple(lowercase__ )
A = tuple(list(word[:-1] ) + [word[-1] + '</w>'] )
A = get_pairs(lowercase__ )
if not pairs:
return token
while True:
A = min(lowercase__ ,key=lambda A_ : self.bpe_ranks.get(lowercase__ ,float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
A = bigram
A = []
A = 0
while i < len(lowercase__ ):
try:
A = word.index(lowercase__ ,lowercase__ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
A = j
if word[i] == first and i < len(lowercase__ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
A = tuple(lowercase__ )
A = new_word
if len(lowercase__ ) == 1:
break
else:
A = get_pairs(lowercase__ )
A = "@@ ".join(lowercase__ )
A = word[:-4]
A = word
return word
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Union[str, Any]:
A = []
A = re.findall(R'\S+\n?' ,lowercase__ )
for token in words:
split_tokens.extend(list(self.bpe(lowercase__ ).split(' ' ) ) )
return split_tokens
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[Any] ) -> Any:
return self.encoder.get(lowercase__ ,self.encoder.get(self.unk_token ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ) -> List[str]:
return self.decoder.get(lowercase__ ,self.unk_token )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : str ) -> Optional[int]:
A = " ".join(lowercase__ ).replace('@@ ' ,'' ).strip()
return out_string
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Union[str, Any] = None ) -> Dict:
if not os.path.isdir(lowercase__ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
lowercase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(
lowercase__ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(lowercase__ ,'w' ,encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder ,indent=2 ,sort_keys=lowercase__ ,ensure_ascii=lowercase__ ) + '\n' )
A = 0
with open(lowercase__ ,'w' ,encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() ,key=lambda A_ : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
A = token_index
writer.write(' '.join(lowercase__ ) + '\n' )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far) | 701 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_lowercase = 8
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int]=BITS ):
A = x.device
A = (x * 255).int().clamp(0 , 255 )
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b c h w -> b c 1 h w' )
A = ((x & mask) != 0).float()
A = rearrange(snake_case__ , 'b c d h w -> b (c d) h w' )
A = bits * 2 - 1
return bits
def _snake_case ( snake_case__ : Any , snake_case__ : Any=BITS ):
A = x.device
A = (x > 0).int()
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b (c d) h w -> b c d h w' , d=8 )
A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = True , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
A = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
A = self.alphas_cumprod[timestep]
A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
A = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
A = self._get_variance(snake_case__ , snake_case__ )
A = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
A = model_output.device if torch.is_tensor(snake_case__ ) else 'cpu'
A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ )
A = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise
A = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Tuple="epsilon" , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
A = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
A , A = torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
A = None
# 1. compute alphas, betas
A = self.alphas_cumprod[t]
A = self.alphas_cumprod[t - 1] if t > 0 else self.one
A = 1 - alpha_prod_t
A = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
A = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A = 0
if t > 0:
A = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device )
A = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,A_ : UNetaDConditionModel ,A_ : Union[DDIMScheduler, DDPMScheduler] ,A_ : Optional[float] = 1.0 ,) -> Optional[int]:
super().__init__()
A = bit_scale
A = (
ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A_ ,scheduler=A_ )
@torch.no_grad()
def __call__( self : Tuple ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 50 ,A_ : Optional[torch.Generator] = None ,A_ : Optional[int] = 1 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]:
A = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,)
A = decimal_to_bits(A_ ) * self.bit_scale
A = latents.to(self.device )
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
A = self.unet(A_ ,A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample
A = bits_to_decimal(A_ )
if output_type == "pil":
A = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ ) | 22 | 0 |
"""simple docstring"""
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[int] ,A_ : Union[str, Any]=13 ,A_ : List[Any]=7 ,A_ : Optional[int]=True ,A_ : Dict=True ,A_ : int=True ,A_ : Tuple=True ,A_ : Any=99 ,A_ : Tuple=32 ,A_ : List[str]=5 ,A_ : Dict=4 ,A_ : Dict=37 ,A_ : Union[str, Any]="gelu" ,A_ : Union[str, Any]=0.1 ,A_ : Tuple=0.1 ,A_ : Optional[Any]=512 ,A_ : Optional[Any]=16 ,A_ : Tuple=2 ,A_ : List[Any]=0.02 ,A_ : Optional[int]=False ,A_ : int=True ,A_ : Union[str, Any]="None" ,A_ : List[Any]=3 ,A_ : int=4 ,A_ : List[str]=None ,) -> Any:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_vocab_size
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = relative_attention
A = position_biased_input
A = pos_att_type
A = scope
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> str:
return DebertaConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,relative_attention=self.relative_attention ,position_biased_input=self.position_biased_input ,pos_att_type=self.pos_att_type ,)
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
A = self.get_config()
A = 300
return config
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Tuple ) -> Union[str, Any]:
self.parent.assertListEqual(list(result.loss.size() ) ,[] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : str ,A_ : int ,A_ : Tuple ,A_ : Any ,A_ : Tuple ,A_ : List[Any] ) -> int:
A = DebertaModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,token_type_ids=A_ )[0]
A = model(A_ ,token_type_ids=A_ )[0]
A = model(A_ )[0]
self.parent.assertListEqual(list(sequence_output.size() ) ,[self.batch_size, self.seq_length, self.hidden_size] )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Dict ,A_ : int ,A_ : List[str] ,A_ : Tuple ,A_ : str ,A_ : Union[str, Any] ,A_ : Dict ) -> List[str]:
A = DebertaForMaskedLM(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Tuple ,A_ : List[str] ,A_ : Any ,A_ : Any ,A_ : Any ) -> List[str]:
A = self.num_labels
A = DebertaForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertListEqual(list(result.logits.size() ) ,[self.batch_size, self.num_labels] )
self.check_loss_output(A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : List[Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : str ,A_ : int ) -> str:
A = self.num_labels
A = DebertaForTokenClassification(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Tuple ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Union[str, Any] ) -> int:
A = DebertaForQuestionAnswering(config=A_ )
model.to(A_ )
model.eval()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,start_positions=A_ ,end_positions=A_ ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
A = self.prepare_config_and_inputs()
(
A
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
_lowerCamelCase: Tuple = (
{
'''feature-extraction''': DebertaModel,
'''fill-mask''': DebertaForMaskedLM,
'''question-answering''': DebertaForQuestionAnswering,
'''text-classification''': DebertaForSequenceClassification,
'''token-classification''': DebertaForTokenClassification,
'''zero-shot''': DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
_lowerCamelCase: str = True
_lowerCamelCase: Optional[int] = False
_lowerCamelCase: Any = False
_lowerCamelCase: Any = False
_lowerCamelCase: str = False
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
A = DebertaModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = DebertaModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
@require_sentencepiece
@require_tokenizers
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = DebertaModel.from_pretrained('microsoft/deberta-base' )
A = torch.tensor([[0, 3_1414, 232, 328, 740, 1140, 1_2695, 69, 4_6078, 1588, 2]] )
A = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
A = model(A_ ,attention_mask=A_ )[0]
# compare the actual values for a slice.
A = torch.tensor(
[[[-0.59_86, -0.80_55, -0.84_62], [1.44_84, -0.93_48, -0.80_59], [0.31_23, 0.00_32, -1.41_31]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] ,A_ ,atol=1e-4 ) ,F'{output[:, 1:4, 1:4]}' ) | 702 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''yolos'''
def __init__( self : Dict ,A_ : Optional[Any]=768 ,A_ : int=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.0 ,A_ : List[Any]=0.0 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : List[Any]=[512, 864] ,A_ : Union[str, Any]=16 ,A_ : List[str]=3 ,A_ : Optional[int]=True ,A_ : Tuple=100 ,A_ : str=True ,A_ : Optional[Any]=False ,A_ : Any=1 ,A_ : Optional[Any]=5 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=5 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.1 ,**A_ : Tuple ,) -> Any:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return 12 | 22 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase_ ( __A , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = BertTokenizer
_lowerCamelCase: Dict = BertTokenizerFast
_lowerCamelCase: Optional[int] = True
_lowerCamelCase: Optional[Any] = True
_lowerCamelCase: Any = filter_non_english
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
super().setUp()
A = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> List[Any]:
A = 'UNwant\u00E9d,running'
A = 'unwanted, running'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = self.tokenizer_class(self.vocab_file )
A = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(A_ ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(A_ ) ,[9, 6, 7, 12, 10, 11] )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
if not self.test_rust_tokenizer:
return
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
A = 'UNwant\u00E9d,running'
A = tokenizer.tokenize(A_ )
A = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ ,A_ )
A = tokenizer.encode(A_ ,add_special_tokens=A_ )
A = rust_tokenizer.encode(A_ ,add_special_tokens=A_ )
self.assertListEqual(A_ ,A_ )
A = self.get_rust_tokenizer()
A = tokenizer.encode(A_ )
A = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ ,A_ )
# With lower casing
A = self.get_tokenizer(do_lower_case=A_ )
A = self.get_rust_tokenizer(do_lower_case=A_ )
A = 'UNwant\u00E9d,running'
A = tokenizer.tokenize(A_ )
A = rust_tokenizer.tokenize(A_ )
self.assertListEqual(A_ ,A_ )
A = tokenizer.encode(A_ ,add_special_tokens=A_ )
A = rust_tokenizer.encode(A_ ,add_special_tokens=A_ )
self.assertListEqual(A_ ,A_ )
A = self.get_rust_tokenizer()
A = tokenizer.encode(A_ )
A = rust_tokenizer.encode(A_ )
self.assertListEqual(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[str]:
A = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) ,['ah', '\u535A', '\u63A8', 'zz'] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
A = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = BasicTokenizer(do_lower_case=A_ ,strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['h\u00E9llo'] )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
A = BasicTokenizer(do_lower_case=A_ ,strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = BasicTokenizer(do_lower_case=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
A = BasicTokenizer(do_lower_case=A_ ,strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
A = BasicTokenizer(do_lower_case=A_ ,strip_accents=A_ )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
A = BasicTokenizer(do_lower_case=A_ ,never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = BasicTokenizer()
A = 'a\n\'ll !!to?\'d of, can\'t.'
A = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
A = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
A = {}
for i, token in enumerate(A_ ):
A = i
A = WordpieceTokenizer(vocab=A_ ,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) ,[] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) ,['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) ,['[UNK]', 'runn', '##ing'] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(A_ ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Tuple:
A = self.tokenizer_class.from_pretrained('bert-base-uncased' )
A = tokenizer.encode('sequence builders' ,add_special_tokens=A_ )
A = tokenizer.encode('multi-sequence build' ,add_special_tokens=A_ )
A = tokenizer.build_inputs_with_special_tokens(A_ )
A = tokenizer.build_inputs_with_special_tokens(A_ ,A_ )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ )
A = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
A = tokenizer_r.encode_plus(
A_ ,return_attention_mask=A_ ,return_token_type_ids=A_ ,return_offsets_mapping=A_ ,add_special_tokens=A_ ,)
A = tokenizer_r.do_lower_case if hasattr(A_ ,'do_lower_case' ) else False
A = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens['offset_mapping'] )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
A = ['的', '人', '有']
A = ''.join(A_ )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A = True
A = self.tokenizer_class.from_pretrained(A_ ,**A_ )
A = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ )
A = tokenizer_p.encode(A_ ,add_special_tokens=A_ )
A = tokenizer_r.encode(A_ ,add_special_tokens=A_ )
A = tokenizer_r.convert_ids_to_tokens(A_ )
A = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(A_ ,A_ )
self.assertListEqual(A_ ,A_ )
A = False
A = self.rust_tokenizer_class.from_pretrained(A_ ,**A_ )
A = self.tokenizer_class.from_pretrained(A_ ,**A_ )
A = tokenizer_r.encode(A_ ,add_special_tokens=A_ )
A = tokenizer_p.encode(A_ ,add_special_tokens=A_ )
A = tokenizer_r.convert_ids_to_tokens(A_ )
A = tokenizer_p.convert_ids_to_tokens(A_ )
# it is expected that only the first Chinese character is not preceded by "##".
A = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(A_ )
]
self.assertListEqual(A_ ,A_ )
self.assertListEqual(A_ ,A_ )
| 703 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=0 ) -> Dict:
A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) )
A = np.random.RandomState(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = ort.SessionOptions()
A = False
return options
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 22 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {'''configuration_fnet''': ['''FNET_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''FNetConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''FNetTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''FNetTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FNET_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''FNetForMaskedLM''',
'''FNetForMultipleChoice''',
'''FNetForNextSentencePrediction''',
'''FNetForPreTraining''',
'''FNetForQuestionAnswering''',
'''FNetForSequenceClassification''',
'''FNetForTokenClassification''',
'''FNetLayer''',
'''FNetModel''',
'''FNetPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_fnet import FNET_PRETRAINED_CONFIG_ARCHIVE_MAP, FNetConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet import FNetTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_fnet_fast import FNetTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_fnet import (
FNET_PRETRAINED_MODEL_ARCHIVE_LIST,
FNetForMaskedLM,
FNetForMultipleChoice,
FNetForNextSentencePrediction,
FNetForPreTraining,
FNetForQuestionAnswering,
FNetForSequenceClassification,
FNetForTokenClassification,
FNetLayer,
FNetModel,
FNetPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 704 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''pixel_values''']
def __init__( self : Optional[Any] ,A_ : bool = True ,A_ : Optional[Dict[str, int]] = None ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Optional[Any] ,) -> None:
super().__init__(**A_ )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(A_ ,default_to_square=A_ )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ ,default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ )
return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : float ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[str] ) -> np.ndarray:
return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Any ,) -> np.ndarray:
return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : ImageInput ,A_ : Optional[bool] = None ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : Optional[bool] = None ,A_ : Optional[float] = None ,A_ : Optional[bool] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**A_ : Tuple ,) -> List[Any]:
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(A_ ,default_to_square=A_ )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(A_ ) for image in images]
if do_resize:
A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images]
if do_center_crop:
A = [self.center_crop(image=A_ ,size=A_ ) for image in images]
if do_rescale:
A = [self.rescale(image=A_ ,scale=A_ ) for image in images]
if do_normalize:
A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images]
A = [to_channel_dimension_format(A_ ,A_ ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=A_ ,tensor_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : List[Tuple] = None ) -> str:
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(A_ ):
A = target_sizes.numpy()
A = []
for idx in range(len(A_ ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=A_ )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 22 | 0 |
"""simple docstring"""
import argparse
import glob
import importlib.util
import os
import re
import black
from doc_builder.style_doc import style_docstrings_in_code
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_copies.py
_lowercase = '''src/diffusers'''
_lowercase = '''.'''
# This is to make sure the diffusers module imported is the one in the repo.
_lowercase = importlib.util.spec_from_file_location(
"""diffusers""",
os.path.join(DIFFUSERS_PATH, """__init__.py"""),
submodule_search_locations=[DIFFUSERS_PATH],
)
_lowercase = spec.loader.load_module()
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : List[str] ):
return line.startswith(lowerCAmelCase__ ) or len(lowerCAmelCase__ ) <= 1 or re.search(r'^\s*\)(\s*->.*:|:)\s*$' , lowerCAmelCase__ ) is not None
def _snake_case ( snake_case__ : Optional[int] ):
A = object_name.split('.' )
A = 0
# First let's find the module where our object lives.
A = parts[i]
while i < len(lowerCAmelCase__ ) and not os.path.isfile(os.path.join(lowerCAmelCase__ , F'{module}.py' ) ):
i += 1
if i < len(lowerCAmelCase__ ):
A = os.path.join(lowerCAmelCase__ , parts[i] )
if i >= len(lowerCAmelCase__ ):
raise ValueError(F'`object_name` should begin with the name of a module of diffusers but got {object_name}.' )
with open(os.path.join(lowerCAmelCase__ , F'{module}.py' ) , 'r' , encoding='utf-8' , newline='\n' ) as f:
A = f.readlines()
# Now let's find the class / func in the code!
A = ''
A = 0
for name in parts[i + 1 :]:
while (
line_index < len(lowerCAmelCase__ ) and re.search(rF'^{indent}(class|def)\s+{name}(\(|\:)' , lines[line_index] ) is None
):
line_index += 1
indent += " "
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
raise ValueError(F' {object_name} does not match any function or class in {module}.' )
# We found the beginning of the class / func, now let's find the end (when the indent diminishes).
A = line_index
while line_index < len(lowerCAmelCase__ ) and _should_continue(lines[line_index] , lowerCAmelCase__ ):
line_index += 1
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
return "".join(lowerCAmelCase__ )
_lowercase = re.compile(r"""^(\s*)#\s*Copied from\s+diffusers\.(\S+\.\S+)\s*($|\S.*$)""")
_lowercase = re.compile(r"""^\s*(\S+)->(\S+)(\s+.*|$)""")
_lowercase = re.compile(r"""<FILL\s+[^>]*>""")
def _snake_case ( snake_case__ : Any ):
A = code.split('\n' )
A = 0
while idx < len(lowerCAmelCase__ ) and len(lines[idx] ) == 0:
idx += 1
if idx < len(lowerCAmelCase__ ):
return re.search(r'^(\s*)\S' , lines[idx] ).groups()[0]
return ""
def _snake_case ( snake_case__ : Optional[int] ):
A = len(get_indent(lowerCAmelCase__ ) ) > 0
if has_indent:
A = F'class Bla:\n{code}'
A = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=119 , preview=lowerCAmelCase__ )
A = black.format_str(lowerCAmelCase__ , mode=lowerCAmelCase__ )
A , A = style_docstrings_in_code(lowerCAmelCase__ )
return result[len('class Bla:\n' ) :] if has_indent else result
def _snake_case ( snake_case__ : str , snake_case__ : str=False ):
with open(lowerCAmelCase__ , 'r' , encoding='utf-8' , newline='\n' ) as f:
A = f.readlines()
A = []
A = 0
# Not a for loop cause `lines` is going to change (if `overwrite=True`).
while line_index < len(lowerCAmelCase__ ):
A = _re_copy_warning.search(lines[line_index] )
if search is None:
line_index += 1
continue
# There is some copied code here, let's retrieve the original.
A , A , A = search.groups()
A = find_code_in_diffusers(lowerCAmelCase__ )
A = get_indent(lowerCAmelCase__ )
A = line_index + 1 if indent == theoretical_indent else line_index + 2
A = theoretical_indent
A = start_index
# Loop to check the observed code, stop when indentation diminishes or if we see a End copy comment.
A = True
while line_index < len(lowerCAmelCase__ ) and should_continue:
line_index += 1
if line_index >= len(lowerCAmelCase__ ):
break
A = lines[line_index]
A = _should_continue(lowerCAmelCase__ , lowerCAmelCase__ ) and re.search(F'^{indent}# End copy' , lowerCAmelCase__ ) is None
# Clean up empty lines at the end (if any).
while len(lines[line_index - 1] ) <= 1:
line_index -= 1
A = lines[start_index:line_index]
A = ''.join(lowerCAmelCase__ )
# Remove any nested `Copied from` comments to avoid circular copies
A = [line for line in theoretical_code.split('\n' ) if _re_copy_warning.search(lowerCAmelCase__ ) is None]
A = '\n'.join(lowerCAmelCase__ )
# Before comparing, use the `replace_pattern` on the original code.
if len(lowerCAmelCase__ ) > 0:
A = replace_pattern.replace('with' , '' ).split(',' )
A = [_re_replace_pattern.search(lowerCAmelCase__ ) for p in patterns]
for pattern in patterns:
if pattern is None:
continue
A , A , A = pattern.groups()
A = re.sub(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
if option.strip() == "all-casing":
A = re.sub(obja.lower() , obja.lower() , lowerCAmelCase__ )
A = re.sub(obja.upper() , obja.upper() , lowerCAmelCase__ )
# Blackify after replacement. To be able to do that, we need the header (class or function definition)
# from the previous line
A = blackify(lines[start_index - 1] + theoretical_code )
A = theoretical_code[len(lines[start_index - 1] ) :]
# Test for a diff and act accordingly.
if observed_code != theoretical_code:
diffs.append([object_name, start_index] )
if overwrite:
A = lines[:start_index] + [theoretical_code] + lines[line_index:]
A = start_index + 1
if overwrite and len(lowerCAmelCase__ ) > 0:
# Warn the user a file has been modified.
print(F'Detected changes, rewriting {filename}.' )
with open(lowerCAmelCase__ , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCAmelCase__ )
return diffs
def _snake_case ( snake_case__ : Optional[int] = False ):
A = glob.glob(os.path.join(lowerCAmelCase__ , '**/*.py' ) , recursive=lowerCAmelCase__ )
A = []
for filename in all_files:
A = is_copy_consistent(lowerCAmelCase__ , lowerCAmelCase__ )
diffs += [F'- {filename}: copy does not match {d[0]} at line {d[1]}' for d in new_diffs]
if not overwrite and len(lowerCAmelCase__ ) > 0:
A = '\n'.join(lowerCAmelCase__ )
raise Exception(
'Found the following copy inconsistencies:\n'
+ diff
+ '\nRun `make fix-copies` or `python utils/check_copies.py --fix_and_overwrite` to fix them.' )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument("""--fix_and_overwrite""", action="""store_true""", help="""Whether to fix inconsistencies.""")
_lowercase = parser.parse_args()
check_copies(args.fix_and_overwrite) | 705 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowercase = data_utils.TransfoXLTokenizer
_lowercase = data_utils.TransfoXLCorpus
_lowercase = data_utils
_lowercase = data_utils
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case__ , 'rb' ) as fp:
A = pickle.load(snake_case__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
A = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
A = corpus.vocab.__dict__
torch.save(snake_case__ , snake_case__ )
A = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , snake_case__ )
A = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(snake_case__ , snake_case__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
A = os.path.abspath(snake_case__ )
A = os.path.abspath(snake_case__ )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
A = TransfoXLConfig()
else:
A = TransfoXLConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
A = TransfoXLLMHeadModel(snake_case__ )
A = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
A = os.path.join(snake_case__ , snake_case__ )
A = os.path.join(snake_case__ , snake_case__ )
print(F'Save PyTorch model to {os.path.abspath(snake_case__ )}' )
torch.save(model.state_dict() , snake_case__ )
print(F'Save configuration file to {os.path.abspath(snake_case__ )}' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
_lowercase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 22 | 0 |
"""simple docstring"""
from random import randint, random
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : int , snake_case__ : int , snake_case__ : str = False , snake_case__ : Any = False , snake_case__ : Dict = 5 , ):
A = [[-1] * number_of_cells] # Create a highway without any car
A = 0
A = max(lowerCAmelCase_ , 0 )
while i < number_of_cells:
A = (
randint(0 , lowerCAmelCase_ ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Union[str, Any] ):
A = 0
A = highway_now[car_index + 1 :]
for cell in range(len(lowerCAmelCase_ ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(lowerCAmelCase_ , -1 )
def _snake_case ( snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] ):
A = len(lowerCAmelCase_ )
# Beforce calculations, the highway is empty
A = [-1] * number_of_cells
for car_index in range(lowerCAmelCase_ ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
A = min(highway_now[car_index] + 1 , lowerCAmelCase_ )
# Number of empty cell before the next car
A = get_distance(lowerCAmelCase_ , lowerCAmelCase_ ) - 1
# We can't have the car causing an accident
A = min(next_highway[car_index] , lowerCAmelCase_ )
if random() < probability:
# Randomly, a driver will slow down
A = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _snake_case ( snake_case__ : int , snake_case__ : Optional[int] , snake_case__ : Any , snake_case__ : Tuple ):
A = len(highway[0] )
for i in range(lowerCAmelCase_ ):
A = update(highway[i] , lowerCAmelCase_ , lowerCAmelCase_ )
A = [-1] * number_of_cells
for car_index in range(lowerCAmelCase_ ):
A = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
A = (car_index + speed) % number_of_cells
# Commit the change of position
A = speed
highway.append(lowerCAmelCase_ )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod()
| 706 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ) -> int:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Optional[Any]=1 ) -> int:
if self.graph.get(A_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A = [[w, v]]
if not self.graph.get(A_ ):
A = []
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ) -> Optional[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int=-2 ,A_ : Dict=-1 ) -> str:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any=-1 ) -> int:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any]=-2 ) -> Optional[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ) -> Any:
A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> str:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any]=-2 ) -> Any:
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return sorted_nodes
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple=-2 ,A_ : List[str]=-1 ) -> str:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any]=-2 ) -> Dict:
A = time()
self.bfs(A_ )
A = time()
return end - begin
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> Tuple:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[str]=1 ) -> Dict:
# check if the u exists
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A = [[w, v]]
# add the other way
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A = [[w, u]]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : List[str] ) -> List[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
# the other way round
if self.graph.get(A_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=-2 ,A_ : List[Any]=-1 ) -> int:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=-1 ) -> List[Any]:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict=-2 ) -> List[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[Any]:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any]=-2 ,A_ : List[str]=-1 ) -> Any:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=-2 ) -> Union[str, Any]:
A = time()
self.bfs(A_ )
A = time()
return end - begin | 22 | 0 |
"""simple docstring"""
import unittest
from dataclasses import dataclass
import pytest
from accelerate.commands.config.config_args import SageMakerConfig
from accelerate.utils import ComputeEnvironment
from accelerate.utils.launch import _convert_nargs_to_dict
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = ComputeEnvironment.AMAZON_SAGEMAKER
_lowerCamelCase: List[str] = True
_lowerCamelCase: int = '''ml.p3.2xlarge'''
_lowerCamelCase: Optional[int] = '''accelerate_sagemaker_execution_role'''
_lowerCamelCase: int = '''hf-sm'''
_lowerCamelCase: Dict = '''us-east-1'''
_lowerCamelCase: int = 1
_lowerCamelCase: str = '''accelerate-sagemaker-1'''
_lowerCamelCase: Union[str, Any] = '''1.6'''
_lowerCamelCase: Union[str, Any] = '''4.4'''
_lowerCamelCase: Any = '''train.py'''
_lowerCamelCase: str = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''False''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
_lowerCamelCase: Union[str, Any] = [
'''--model_name_or_path''',
'''bert''',
'''--do_train''',
'''--do_test''',
'''False''',
'''--do_predict''',
'''--epochs''',
'''3''',
'''--learning_rate''',
'''5e-5''',
'''--max_steps''',
'''50.5''',
]
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
# If no defaults are changed, `to_kwargs` returns an empty dict.
A = _convert_nargs_to_dict(MockLaunchConfig.success_training_script_args )
assert isinstance(converted_args['model_name_or_path'] ,A_ )
assert isinstance(converted_args['do_train'] ,A_ )
assert isinstance(converted_args['epochs'] ,A_ )
assert isinstance(converted_args['learning_rate'] ,A_ )
assert isinstance(converted_args['max_steps'] ,A_ )
with pytest.raises(A_ ):
_convert_nargs_to_dict(MockLaunchConfig.fail_training_script_args ) | 707 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _snake_case ( snake_case__ : str = "isbn/0140328726" ):
A = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
A = F'{olid} is not a valid Open Library olid'
raise ValueError(snake_case__ )
return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json()
def _snake_case ( snake_case__ : dict ):
A = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
A = data['First sentence']['value']
for key, value in data.items():
if isinstance(snake_case__ , snake_case__ ):
A = ', '.join(snake_case__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowercase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
_lowercase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""") | 22 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_lowercase = {
'configuration_lxmert': ['LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'LxmertConfig'],
'tokenization_lxmert': ['LxmertTokenizer'],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['LxmertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'LxmertEncoder',
'LxmertForPreTraining',
'LxmertForQuestionAnswering',
'LxmertModel',
'LxmertPreTrainedModel',
'LxmertVisualFeatureEncoder',
'LxmertXLayer',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFLxmertForPreTraining',
'TFLxmertMainLayer',
'TFLxmertModel',
'TFLxmertPreTrainedModel',
'TFLxmertVisualFeatureEncoder',
]
if TYPE_CHECKING:
from .configuration_lxmert import LXMERT_PRETRAINED_CONFIG_ARCHIVE_MAP, LxmertConfig
from .tokenization_lxmert import LxmertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_lxmert_fast import LxmertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_lxmert import (
LxmertEncoder,
LxmertForPreTraining,
LxmertForQuestionAnswering,
LxmertModel,
LxmertPreTrainedModel,
LxmertVisualFeatureEncoder,
LxmertXLayer,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_lxmert import (
TF_LXMERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLxmertForPreTraining,
TFLxmertMainLayer,
TFLxmertModel,
TFLxmertPreTrainedModel,
TFLxmertVisualFeatureEncoder,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 708 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''PerceiverFeatureExtractor''']
_lowercase = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 0 |
from __future__ import annotations
def _snake_case ( snake_case__ : int ):
if len(A_ ) < 2:
raise ValueError('Monogons and Digons are not polygons in the Euclidean space' )
if any(i <= 0 for i in nums ):
raise ValueError('All values must be greater than 0' )
A = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod() | 709 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _snake_case ( snake_case__ : int ):
A = SwinvaConfig()
A = swinva_name.split('_' )
A = name_split[1]
if "to" in name_split[3]:
A = int(name_split[3][-3:] )
else:
A = int(name_split[3] )
if "to" in name_split[2]:
A = int(name_split[2][-2:] )
else:
A = int(name_split[2][6:] )
if model_size == "tiny":
A = 96
A = (2, 2, 6, 2)
A = (3, 6, 12, 24)
elif model_size == "small":
A = 96
A = (2, 2, 18, 2)
A = (3, 6, 12, 24)
elif model_size == "base":
A = 128
A = (2, 2, 18, 2)
A = (4, 8, 16, 32)
else:
A = 192
A = (2, 2, 18, 2)
A = (6, 12, 24, 48)
if "to" in swinva_name:
A = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
A = 2_1841
A = 'huggingface/label-files'
A = 'imagenet-22k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
else:
A = 1000
A = 'huggingface/label-files'
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = img_size
A = num_classes
A = embed_dim
A = depths
A = num_heads
A = window_size
return config
def _snake_case ( snake_case__ : List[Any] ):
if "patch_embed.proj" in name:
A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
A = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
A = 'encoder.' + name
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
A = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
A = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
A = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
A = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
A = 'layernorm.weight'
if name == "norm.bias":
A = 'layernorm.bias'
if "head" in name:
A = name.replace('head' , 'classifier' )
else:
A = 'swinv2.' + name
return name
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ):
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A = key.split('.' )
A = int(key_split[1] )
A = int(key_split[3] )
A = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[
dim : dim * 2
]
A = val[-dim:]
else:
A = val
return orig_state_dict
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ):
A = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A = get_swinva_config(snake_case__ )
A = SwinvaForImageClassification(snake_case__ )
model.eval()
A = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A = image_processor(images=snake_case__ , return_tensors='pt' )
A = timm_model(inputs['pixel_values'] )
A = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowercase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path) | 22 | 0 |
"""simple docstring"""
import logging
import os
import sys
import warnings
from dataclasses import dataclass, field
from random import randint
from typing import Optional
import datasets
import evaluate
import numpy as np
from datasets import DatasetDict, load_dataset
import transformers
from transformers import (
AutoConfig,
AutoFeatureExtractor,
AutoModelForAudioClassification,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint
from transformers.utils import check_min_version, send_example_telemetry
from transformers.utils.versions import require_version
_lowercase = logging.getLogger(__name__)
# Will error if the minimal version of Transformers is not installed. Remove at your own risks.
check_min_version('''4.31.0''')
require_version('''datasets>=1.14.0''', '''To fix: pip install -r examples/pytorch/audio-classification/requirements.txt''')
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Dict , snake_case__ : List[Any] = 1_6000 ):
A = int(round(sample_rate * max_length ) )
if len(snake_case__ ) <= sample_length:
return wav
A = randint(0 , len(snake_case__ ) - sample_length - 1 )
return wav[random_offset : random_offset + sample_length]
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: Optional[str] = field(default=snake_case__ , metadata={'''help''': '''Name of a dataset from the datasets package'''} )
_lowerCamelCase: Optional[str] = field(
default=snake_case__ , metadata={'''help''': '''The configuration name of the dataset to use (via the datasets library).'''} )
_lowerCamelCase: Optional[str] = field(
default=snake_case__ , metadata={'''help''': '''A file containing the training audio paths and labels.'''} )
_lowerCamelCase: Optional[str] = field(
default=snake_case__ , metadata={'''help''': '''A file containing the validation audio paths and labels.'''} )
_lowerCamelCase: str = field(
default='''train''' , metadata={
'''help''': '''The name of the training data set split to use (via the datasets library). Defaults to \'train\''''
} , )
_lowerCamelCase: str = field(
default='''validation''' , metadata={
'''help''': (
'''The name of the training data set split to use (via the datasets library). Defaults to \'validation\''''
)
} , )
_lowerCamelCase: str = field(
default='''audio''' , metadata={'''help''': '''The name of the dataset column containing the audio data. Defaults to \'audio\''''} , )
_lowerCamelCase: str = field(
default='''label''' , metadata={'''help''': '''The name of the dataset column containing the labels. Defaults to \'label\''''} )
_lowerCamelCase: Optional[int] = field(
default=snake_case__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of training examples to this '''
'''value if set.'''
)
} , )
_lowerCamelCase: Optional[int] = field(
default=snake_case__ , metadata={
'''help''': (
'''For debugging purposes or quicker training, truncate the number of evaluation examples to this '''
'''value if set.'''
)
} , )
_lowerCamelCase: float = field(
default=20 , metadata={'''help''': '''Audio clips will be randomly cut to this length during training if the value is set.'''} , )
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: str = field(
default='''facebook/wav2vec2-base''' , metadata={'''help''': '''Path to pretrained model or model identifier from huggingface.co/models'''} , )
_lowerCamelCase: Optional[str] = field(
default=snake_case__ , metadata={'''help''': '''Pretrained config name or path if not the same as model_name'''} )
_lowerCamelCase: Optional[str] = field(
default=snake_case__ , metadata={'''help''': '''Where do you want to store the pretrained models downloaded from the Hub'''} )
_lowerCamelCase: str = field(
default='''main''' , metadata={'''help''': '''The specific model version to use (can be a branch name, tag name or commit id).'''} , )
_lowerCamelCase: Optional[str] = field(
default=snake_case__ , metadata={'''help''': '''Name or path of preprocessor config.'''} )
_lowerCamelCase: bool = field(
default=snake_case__ , metadata={'''help''': '''Whether to freeze the feature encoder layers of the model.'''} )
_lowerCamelCase: bool = field(
default=snake_case__ , metadata={'''help''': '''Whether to generate an attention mask in the feature extractor.'''} )
_lowerCamelCase: bool = field(
default=snake_case__ , metadata={
'''help''': (
'''Will use the token generated when running `huggingface-cli login` (necessary to use this script '''
'''with private models).'''
)
} , )
_lowerCamelCase: Optional[bool] = field(
default=snake_case__ , metadata={'''help''': '''Whether to freeze the feature extractor layers of the model.'''} )
_lowerCamelCase: bool = field(
default=snake_case__ , metadata={'''help''': '''Will enable to load a pretrained model whose head dimensions are different.'''} , )
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
if not self.freeze_feature_extractor and self.freeze_feature_encoder:
warnings.warn(
'The argument `--freeze_feature_extractor` is deprecated and '
'will be removed in a future version. Use `--freeze_feature_encoder`'
'instead. Setting `freeze_feature_encoder==True`.' ,_A ,)
if self.freeze_feature_extractor and not self.freeze_feature_encoder:
raise ValueError(
'The argument `--freeze_feature_extractor` is deprecated and '
'should not be used in combination with `--freeze_feature_encoder`.'
'Only make use of `--freeze_feature_encoder`.' )
def _snake_case ( ):
# See all possible arguments in src/transformers/training_args.py
# or by passing the --help flag to this script.
# We now keep distinct sets of args, for a cleaner separation of concerns.
A = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith('.json' ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
A = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
A = parser.parse_args_into_dataclasses()
# Sending telemetry. Tracking the example usage helps us better allocate resources to maintain them. The
# information sent is the one passed as arguments along with your Python/PyTorch versions.
send_example_telemetry('run_audio_classification' , snake_case__ , snake_case__ )
# Setup logging
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s' , datefmt='%m/%d/%Y %H:%M:%S' , handlers=[logging.StreamHandler(sys.stdout )] , )
if training_args.should_log:
# The default of training_args.log_level is passive, so we set log level at info here to have that default.
transformers.utils.logging.set_verbosity_info()
A = training_args.get_process_log_level()
logger.setLevel(snake_case__ )
transformers.utils.logging.set_verbosity(snake_case__ )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Log on each process the small summary:
logger.warning(
F'Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu} '
+ F'distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}' )
logger.info(F'Training/evaluation parameters {training_args}' )
# Set seed before initializing model.
set_seed(training_args.seed )
# Detecting last checkpoint.
A = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
A = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F'Output directory ({training_args.output_dir}) already exists and is not empty. '
'Use --overwrite_output_dir to train from scratch.' )
elif last_checkpoint is not None and training_args.resume_from_checkpoint is None:
logger.info(
F'Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change '
'the `--output_dir` or add `--overwrite_output_dir` to train from scratch.' )
# Initialize our dataset and prepare it for the audio classification task.
A = DatasetDict()
A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.train_split_name , use_auth_token=True if model_args.use_auth_token else None , )
A = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=data_args.eval_split_name , use_auth_token=True if model_args.use_auth_token else None , )
if data_args.audio_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--audio_column_name {data_args.audio_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'Make sure to set `--audio_column_name` to the correct audio column - one of '
F'{", ".join(raw_datasets["train"].column_names )}.' )
if data_args.label_column_name not in raw_datasets["train"].column_names:
raise ValueError(
F'--label_column_name {data_args.label_column_name} not found in dataset \'{data_args.dataset_name}\'. '
'Make sure to set `--label_column_name` to the correct text column - one of '
F'{", ".join(raw_datasets["train"].column_names )}.' )
# Setting `return_attention_mask=True` is the way to get a correctly masked mean-pooling over
# transformer outputs in the classifier, but it doesn't always lead to better accuracy
A = AutoFeatureExtractor.from_pretrained(
model_args.feature_extractor_name or model_args.model_name_or_path , return_attention_mask=model_args.attention_mask , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
# `datasets` takes care of automatically loading and resampling the audio,
# so we just need to set the correct target sampling rate.
A = raw_datasets.cast_column(
data_args.audio_column_name , datasets.features.Audio(sampling_rate=feature_extractor.sampling_rate ) )
A = feature_extractor.model_input_names[0]
def train_transforms(snake_case__ : int ):
A = []
for audio in batch[data_args.audio_column_name]:
A = random_subsample(
audio['array'] , max_length=data_args.max_length_seconds , sample_rate=feature_extractor.sampling_rate )
subsampled_wavs.append(snake_case__ )
A = feature_extractor(snake_case__ , sampling_rate=feature_extractor.sampling_rate )
A = {model_input_name: inputs.get(snake_case__ )}
A = list(batch[data_args.label_column_name] )
return output_batch
def val_transforms(snake_case__ : List[Any] ):
A = [audio['array'] for audio in batch[data_args.audio_column_name]]
A = feature_extractor(snake_case__ , sampling_rate=feature_extractor.sampling_rate )
A = {model_input_name: inputs.get(snake_case__ )}
A = list(batch[data_args.label_column_name] )
return output_batch
# Prepare label mappings.
# We'll include these in the model's config to get human readable labels in the Inference API.
A = raw_datasets['train'].features[data_args.label_column_name].names
A = {}, {}
for i, label in enumerate(snake_case__ ):
A = str(snake_case__ )
A = label
# Load the accuracy metric from the datasets package
A = evaluate.load('accuracy' )
# Define our compute_metrics function. It takes an `EvalPrediction` object (a namedtuple with
# `predictions` and `label_ids` fields) and has to return a dictionary string to float.
def compute_metrics(snake_case__ : Dict ):
A = np.argmax(eval_pred.predictions , axis=1 )
return metric.compute(predictions=snake_case__ , references=eval_pred.label_ids )
A = AutoConfig.from_pretrained(
model_args.config_name or model_args.model_name_or_path , num_labels=len(snake_case__ ) , labelaid=snake_case__ , idalabel=snake_case__ , finetuning_task='audio-classification' , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
A = AutoModelForAudioClassification.from_pretrained(
model_args.model_name_or_path , from_tf=bool('.ckpt' in model_args.model_name_or_path ) , config=snake_case__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , ignore_mismatched_sizes=model_args.ignore_mismatched_sizes , )
# freeze the convolutional waveform encoder
if model_args.freeze_feature_encoder:
model.freeze_feature_encoder()
if training_args.do_train:
if data_args.max_train_samples is not None:
A = (
raw_datasets['train'].shuffle(seed=training_args.seed ).select(range(data_args.max_train_samples ) )
)
# Set the training transforms
raw_datasets["train"].set_transform(snake_case__ , output_all_columns=snake_case__ )
if training_args.do_eval:
if data_args.max_eval_samples is not None:
A = (
raw_datasets['eval'].shuffle(seed=training_args.seed ).select(range(data_args.max_eval_samples ) )
)
# Set the validation transforms
raw_datasets["eval"].set_transform(snake_case__ , output_all_columns=snake_case__ )
# Initialize our trainer
A = Trainer(
model=snake_case__ , args=snake_case__ , train_dataset=raw_datasets['train'] if training_args.do_train else None , eval_dataset=raw_datasets['eval'] if training_args.do_eval else None , compute_metrics=snake_case__ , tokenizer=snake_case__ , )
# Training
if training_args.do_train:
A = None
if training_args.resume_from_checkpoint is not None:
A = training_args.resume_from_checkpoint
elif last_checkpoint is not None:
A = last_checkpoint
A = trainer.train(resume_from_checkpoint=snake_case__ )
trainer.save_model()
trainer.log_metrics('train' , train_result.metrics )
trainer.save_metrics('train' , train_result.metrics )
trainer.save_state()
# Evaluation
if training_args.do_eval:
A = trainer.evaluate()
trainer.log_metrics('eval' , snake_case__ )
trainer.save_metrics('eval' , snake_case__ )
# Write model card and (optionally) push to hub
A = {
'finetuned_from': model_args.model_name_or_path,
'tasks': 'audio-classification',
'dataset': data_args.dataset_name,
'tags': ['audio-classification'],
}
if training_args.push_to_hub:
trainer.push_to_hub(**snake_case__ )
else:
trainer.create_model_card(**snake_case__ )
if __name__ == "__main__":
main() | 710 |
"""simple docstring"""
from math import pi, sqrt
def _snake_case ( snake_case__ : float ):
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(snake_case__ ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(snake_case__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _snake_case ( ):
assert gamma(0.5 ) == sqrt(snake_case__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = 1.0
while num:
_lowercase = float(input('''Gamma of: '''))
print(F"""gamma({num}) = {gamma(num)}""")
print('''\nEnter 0 to exit...''') | 22 | 0 |
"""simple docstring"""
import argparse
import json
import os
import fairseq
import torch
from fairseq.data import Dictionary
from transformers import (
HubertConfig,
HubertForCTC,
HubertModel,
WavaVecaCTCTokenizer,
WavaVecaFeatureExtractor,
WavaVecaProcessor,
logging,
)
logging.set_verbosity_info()
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''w2v_encoder.proj''': '''lm_head''',
'''mask_emb''': '''masked_spec_embed''',
}
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Union[str, Any] , snake_case__ : Optional[int] , snake_case__ : List[str] , snake_case__ : List[Any] ):
for attribute in key.split('.' ):
A = getattr(__snake_case , __snake_case )
if weight_type is not None:
A = getattr(__snake_case , __snake_case ).shape
else:
A = hf_pointer.shape
assert hf_shape == value.shape, (
F'Shape of hf {key + "." + weight_type if weight_type is not None else ""} is {hf_shape}, but should be'
F' {value.shape} for {full_name}'
)
if weight_type == "weight":
A = value
elif weight_type == "weight_g":
A = value
elif weight_type == "weight_v":
A = value
elif weight_type == "bias":
A = value
else:
A = value
logger.info(F'{key + "." + weight_type if weight_type is not None else ""} was initialized from {full_name}.' )
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Optional[Any] , snake_case__ : Optional[int] ):
A = []
A = fairseq_model.state_dict()
A = hf_model.hubert.feature_extractor if is_finetuned else hf_model.feature_extractor
for name, value in fairseq_dict.items():
A = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == 'group' , )
A = True
else:
for key, mapped_key in MAPPING.items():
A = 'hubert.' + mapped_key if (is_finetuned and mapped_key != 'lm_head') else mapped_key
if key in name or (key.split('w2v_model.' )[-1] == name.split('.' )[0] and not is_finetuned):
A = True
if "*" in mapped_key:
A = name.split(__snake_case )[0].split('.' )[-2]
A = mapped_key.replace('*' , __snake_case )
if "weight_g" in name:
A = 'weight_g'
elif "weight_v" in name:
A = 'weight_v'
elif "weight" in name:
A = 'weight'
elif "bias" in name:
A = 'bias'
else:
A = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'Unused weights: {unused_weights}' )
def _snake_case ( snake_case__ : int , snake_case__ : Any , snake_case__ : List[str] , snake_case__ : str , snake_case__ : Dict ):
A = full_name.split('conv_layers.' )[-1]
A = name.split('.' )
A = int(items[0] )
A = int(items[1] )
if type_id == 0:
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.bias.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.'
)
A = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].conv.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.'
)
A = value
logger.info(F'Feat extract conv layer {layer_id} was initialized from {full_name}.' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape, (
F'{full_name} has size {value.shape}, but {feature_extractor[layer_id].layer_norm.bias.data.shape} was'
" found."
)
A = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
elif "weight" in name:
assert value.shape == feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape, (
F'{full_name} has size {value.shape}, but'
F' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.'
)
A = value
logger.info(F'Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def _snake_case ( snake_case__ : Any , snake_case__ : List[str] , snake_case__ : Tuple=None , snake_case__ : int=None , snake_case__ : str=True ):
if config_path is not None:
A = HubertConfig.from_pretrained(__snake_case )
else:
A = HubertConfig()
if is_finetuned:
if dict_path:
A = Dictionary.load(__snake_case )
# important change bos & pad token id since CTC symbol is <pad> and
# not <s> as in fairseq
A = target_dict.pad_index
A = target_dict.bos_index
A = target_dict.eos_index
A = len(target_dict.symbols )
A = os.path.join(__snake_case , 'vocab.json' )
if not os.path.isdir(__snake_case ):
logger.error('--pytorch_dump_folder_path ({}) should be a directory'.format(__snake_case ) )
return
os.makedirs(__snake_case , exist_ok=__snake_case )
with open(__snake_case , 'w' , encoding='utf-8' ) as vocab_handle:
json.dump(target_dict.indices , __snake_case )
A = WavaVecaCTCTokenizer(
__snake_case , unk_token=target_dict.unk_word , pad_token=target_dict.pad_word , bos_token=target_dict.bos_word , eos_token=target_dict.eos_word , word_delimiter_token='|' , do_lower_case=__snake_case , )
A = True if config.feat_extract_norm == 'layer' else False
A = WavaVecaFeatureExtractor(
feature_size=1 , sampling_rate=1_6000 , padding_value=0 , do_normalize=__snake_case , return_attention_mask=__snake_case , )
A = WavaVecaProcessor(feature_extractor=__snake_case , tokenizer=__snake_case )
processor.save_pretrained(__snake_case )
A = HubertForCTC(__snake_case )
else:
A = HubertModel(__snake_case )
if is_finetuned:
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'data': '/'.join(dict_path.split('/' )[:-1] )} )
else:
A , A , A = fairseq.checkpoint_utils.load_model_ensemble_and_task([checkpoint_path] )
A = model[0].eval()
recursively_load_weights(__snake_case , __snake_case , __snake_case )
hf_wavavec.save_pretrained(__snake_case )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
_lowercase = parser.parse_args()
convert_hubert_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
) | 711 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: torch.FloatTensor
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : Dict=3 ,A_ : int=3 ,A_ : str=("DownEncoderBlock2D",) ,A_ : Dict=(64,) ,A_ : str=2 ,A_ : Union[str, Any]=32 ,A_ : Optional[int]="silu" ,A_ : str=True ,) -> Union[str, Any]:
super().__init__()
A = layers_per_block
A = torch.nn.Convad(
A_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
# down
A = block_out_channels[0]
for i, down_block_type in enumerate(A_ ):
A = output_channel
A = block_out_channels[i]
A = i == len(A_ ) - 1
A = get_down_block(
A_ ,num_layers=self.layers_per_block ,in_channels=A_ ,out_channels=A_ ,add_downsample=not is_final_block ,resnet_eps=1e-6 ,downsample_padding=0 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,)
self.down_blocks.append(A_ )
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# out
A = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = 2 * out_channels if double_z else out_channels
A = nn.Convad(block_out_channels[-1] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = x
A = self.conv_in(A_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : Dict ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
# down
if is_torch_version('>=' ,'1.11.0' ):
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,use_reentrant=A_ )
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,use_reentrant=A_ )
else:
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ )
# middle
A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,A_ )
else:
# down
for down_block in self.down_blocks:
A = down_block(A_ )
# middle
A = self.mid_block(A_ )
# post-process
A = self.conv_norm_out(A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any]=3 ,A_ : Optional[int]=3 ,A_ : str=("UpDecoderBlock2D",) ,A_ : Any=(64,) ,A_ : Optional[int]=2 ,A_ : Optional[int]=32 ,A_ : Tuple="silu" ,A_ : Optional[int]="group" ,) -> Any:
super().__init__()
A = layers_per_block
A = nn.Convad(
A_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
A = in_channels if norm_type == 'spatial' else None
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# up
A = list(reversed(A_ ) )
A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
A = output_channel
A = reversed_block_out_channels[i]
A = i == len(A_ ) - 1
A = get_up_block(
A_ ,num_layers=self.layers_per_block + 1 ,in_channels=A_ ,out_channels=A_ ,prev_output_channel=A_ ,add_upsample=not is_final_block ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,resnet_time_scale_shift=A_ ,)
self.up_blocks.append(A_ )
A = output_channel
# out
if norm_type == "spatial":
A = SpatialNorm(block_out_channels[0] ,A_ )
else:
A = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = nn.Convad(block_out_channels[0] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Union[str, Any]=None ) -> Any:
A = z
A = self.conv_in(A_ )
A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : List[Any] ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
if is_torch_version('>=' ,'1.11.0' ):
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ ,use_reentrant=A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,A_ ,use_reentrant=A_ )
else:
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ,A_ )
else:
# middle
A = self.mid_block(A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = up_block(A_ ,A_ )
# post-process
if latent_embeds is None:
A = self.conv_norm_out(A_ )
else:
A = self.conv_norm_out(A_ ,A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict=None ,A_ : List[Any]="random" ,A_ : Optional[int]=False ,A_ : str=True ) -> List[str]:
super().__init__()
A = n_e
A = vq_embed_dim
A = beta
A = legacy
A = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
A = remap
if self.remap is not None:
self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) )
A = self.used.shape[0]
A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A = self.re_embed
A = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
A = n_e
A = sane_index_shape
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Any:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
A = (inds[:, :, None] == used[None, None, ...]).long()
A = match.argmax(-1 )
A = match.sum(2 ) < 1
if self.unknown_index == "random":
A = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
A = self.unknown_index
return new.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
if self.re_embed > self.used.shape[0]: # extra token
A = 0 # simply set to zero
A = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,A_ )
return back.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> str:
# reshape z -> (batch, height, width, channel) and flatten
A = z.permute(0 ,2 ,3 ,1 ).contiguous()
A = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A = torch.argmin(torch.cdist(A_ ,self.embedding.weight ) ,dim=1 )
A = self.embedding(A_ ).view(z.shape )
A = None
A = None
# compute loss for embedding
if not self.legacy:
A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A = z + (z_q - z).detach()
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
A = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
A = self.remap_to_used(A_ )
A = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
A = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : str ) -> Union[str, Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A = indices.reshape(shape[0] ,-1 ) # add batch axis
A = self.unmap_to_all(A_ )
A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A = self.embedding(A_ )
if shape is not None:
A = z_q.view(A_ )
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : str ,A_ : Tuple ,A_ : Dict=False ) -> List[str]:
A = parameters
A , A = torch.chunk(A_ ,2 ,dim=1 )
A = torch.clamp(self.logvar ,-30.0 ,20.0 )
A = deterministic
A = torch.exp(0.5 * self.logvar )
A = torch.exp(self.logvar )
if self.deterministic:
A = A = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
A = randn_tensor(
self.mean.shape ,generator=A_ ,device=self.parameters.device ,dtype=self.parameters.dtype )
A = self.mean + self.std * sample
return x
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple=None ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : Union[str, Any]=[1, 2, 3] ) -> List[str]:
if self.deterministic:
return torch.Tensor([0.0] )
A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return self.mean | 22 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( lowercase_ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : str=0 ) -> List[Any]:
A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(lowerCamelCase_ ) )
A = np.random.RandomState(lowerCamelCase_ )
A = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""image""": image,
"""generator""": generator,
"""num_inference_steps""": 3,
"""strength""": 0.75,
"""guidance_scale""": 7.5,
"""output_type""": """numpy""",
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A = self.get_dummy_inputs()
A = pipe(**lowerCamelCase_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=lowerCamelCase_ )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A = self.get_dummy_inputs()
A = pipe(**lowerCamelCase_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Any:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**lowerCamelCase_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[str]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A = self.get_dummy_inputs()
A = pipe(**lowerCamelCase_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A = self.get_dummy_inputs()
A = pipe(**lowerCamelCase_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A = self.get_dummy_inputs()
A = pipe(**lowerCamelCase_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
A = ort.SessionOptions()
A = False
return options
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=lowerCamelCase_ ,feature_extractor=lowerCamelCase_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A = """A fantasy landscape, trending on artstation"""
A = np.random.RandomState(0 )
A = pipe(
prompt=lowerCamelCase_ ,image=lowerCamelCase_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=lowerCamelCase_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=lowerCamelCase_ ,safety_checker=lowerCamelCase_ ,feature_extractor=lowerCamelCase_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=lowerCamelCase_ )
A = """A fantasy landscape, trending on artstation"""
A = np.random.RandomState(0 )
A = pipe(
prompt=lowerCamelCase_ ,image=lowerCamelCase_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=lowerCamelCase_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 712 |
"""simple docstring"""
def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ):
A = len(snake_case__ )
A = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
A = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
A = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 | 0 |
from itertools import product
def _snake_case ( snake_case__ : int , snake_case__ : int ):
A = sides_number
A = max_face_number * dice_number
A = [0] * (max_total + 1)
A = 1
A = range(A__ , max_face_number + 1 )
for dice_numbers in product(A__ , repeat=A__ ):
A = sum(A__ )
totals_frequencies[total] += 1
return totals_frequencies
def _snake_case ( ):
A = total_frequency_distribution(
sides_number=4 , dice_number=9 )
A = total_frequency_distribution(
sides_number=6 , dice_number=6 )
A = 0
A = 9
A = 4 * 9
A = 6
for peter_total in range(A__ , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
A = (4**9) * (6**6)
A = peter_wins_count / total_games_number
A = round(A__ , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"""{solution() = }""") | 713 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ,A_ : Optional[Any] ,A_ : Optional[int]=2 ,A_ : Any=True ,A_ : List[str]=False ,A_ : Tuple=10 ,A_ : List[Any]=3 ,A_ : Any=32 * 8 ,A_ : Dict=32 * 8 ,A_ : List[Any]=4 ,A_ : Tuple=64 ,) -> List[str]:
A = parent
A = batch_size
A = is_training
A = use_auxiliary_loss
A = num_queries
A = num_channels
A = min_size
A = max_size
A = num_labels
A = hidden_dim
A = hidden_dim
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> List[str]:
A = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
A_ )
A = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=A_ )
A = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=A_ ) > 0.5
).float()
A = (torch.rand((self.batch_size, self.num_labels) ,device=A_ ) > 0.5).long()
A = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
A = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
A = self.num_queries
A = self.num_labels
A = [1, 1, 1, 1]
A = self.num_channels
A = 64
A = 128
A = self.hidden_dim
A = self.hidden_dim
A = self.hidden_dim
return config
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A , A , A , A , A = self.prepare_config_and_inputs()
A = {'pixel_values': pixel_values, 'pixel_mask': pixel_mask}
return config, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = output.encoder_hidden_states
A = output.pixel_decoder_hidden_states
A = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(A_ ) ,config.decoder_layers )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : List[str] ,A_ : Union[str, Any]=False ) -> str:
with torch.no_grad():
A = MaskaFormerModel(config=A_ )
model.to(A_ )
model.eval()
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ ,output_hidden_states=A_ )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : Any ,A_ : Dict ,A_ : Any ,A_ : Dict ) -> Optional[Any]:
A = MaskaFormerForUniversalSegmentation(config=A_ )
model.to(A_ )
model.eval()
def comm_check_on_output(A_ : str ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
A = model(pixel_values=A_ ,pixel_mask=A_ )
A = model(A_ )
comm_check_on_output(A_ )
A = model(
pixel_values=A_ ,pixel_mask=A_ ,mask_labels=A_ ,class_labels=A_ )
comm_check_on_output(A_ )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Union[str, Any] = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
_lowerCamelCase: Optional[Any] = {'''feature-extraction''': MaskaFormerModel} if is_torch_available() else {}
_lowerCamelCase: int = False
_lowerCamelCase: Dict = False
_lowerCamelCase: List[str] = False
_lowerCamelCase: int = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = MaskaFormerModelTester(self )
A = ConfigTester(self ,config_class=A_ ,has_text_modality=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*A_ )
@unittest.skip(reason='Mask2Former does not use inputs_embeds' )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
pass
@unittest.skip(reason='Mask2Former does not have a get_input_embeddings method' )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
pass
@unittest.skip(reason='Mask2Former is not a generative model' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
pass
@unittest.skip(reason='Mask2Former does not use token embeddings' )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
pass
@require_torch_multi_gpu
@unittest.skip(
reason='Mask2Former has some layers using `add_module` which doesn\'t work well with `nn.DataParallel`' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
pass
@unittest.skip('Will be fixed soon by reducing the size of the model used for common tests.' )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
A = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A = [*signature.parameters.keys()]
A = ['pixel_values']
self.assertListEqual(arg_names[:1] ,A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
A = MaskaFormerModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = (self.model_tester.min_size,) * 2
A = {
'pixel_values': torch.randn((2, 3, *size) ,device=A_ ),
'mask_labels': torch.randn((2, 10, *size) ,device=A_ ),
'class_labels': torch.zeros(2 ,10 ,device=A_ ).long(),
}
A = self.model_tester.get_config()
A = MaskaFormerForUniversalSegmentation(A_ ).to(A_ )
A = model(**A_ )
self.assertTrue(outputs.loss is not None )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[Any]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(A_ ,**A_ ,output_hidden_states=A_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> List[str]:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ ).to(A_ )
A = model(**A_ ,output_attentions=A_ )
self.assertTrue(outputs.attentions is not None )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
if not self.model_tester.is_training:
return
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = model_class(A_ )
model.to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ ).loss
loss.backward()
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.all_model_classes[1]
A , A , A , A , A = self.model_tester.prepare_config_and_inputs()
A = True
A = True
A = model_class(A_ ).to(A_ )
model.train()
A = model(A_ ,mask_labels=A_ ,class_labels=A_ )
A = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
A = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
A = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
A = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=A_ )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
_lowercase = 1e-4
def _snake_case ( ):
A = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_vision
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
A = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(A_ )
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
A = torch.tensor(
[[-0.27_90, -1.07_17, -1.16_68], [-0.51_28, -0.31_28, -0.49_87], [-0.58_32, 0.19_71, -0.01_97]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[0.89_73, 1.18_47, 1.17_76], [1.19_34, 1.50_40, 1.51_28], [1.11_53, 1.44_86, 1.49_51]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,A_ ,atol=A_ ) )
A = torch.tensor(
[[2.11_52, 1.70_00, -0.86_03], [1.58_08, 1.80_04, -0.93_53], [1.60_43, 1.74_95, -0.59_99]] ).to(A_ )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = prepare_img()
A = image_processor(A_ ,return_tensors='pt' ).to(A_ )
A = inputs['pixel_values'].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(A_ ,(1, 3, 384, 384) )
with torch.no_grad():
A = model(**A_ )
# masks_queries_logits
A = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
A = [
[-8.78_39, -9.00_56, -8.81_21],
[-7.41_04, -7.03_13, -6.54_01],
[-6.61_05, -6.34_27, -6.46_75],
]
A = torch.tensor(A_ ).to(A_ )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,A_ ,atol=A_ ) )
# class_queries_logits
A = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
A = torch.tensor(
[
[1.83_24, -8.08_35, -4.19_22],
[0.84_50, -9.00_50, -3.60_53],
[0.30_45, -7.72_93, -3.02_75],
] ).to(A_ )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,A_ ,atol=A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(A_ ).eval()
A = self.default_image_processor
A = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors='pt' ,)
A = inputs['pixel_values'].to(A_ )
A = [el.to(A_ ) for el in inputs['mask_labels']]
A = [el.to(A_ ) for el in inputs['class_labels']]
with torch.no_grad():
A = model(**A_ )
self.assertTrue(outputs.loss is not None ) | 22 | 0 |
import unittest
from transformers.testing_utils import CaptureStdout
from transformers.tools.python_interpreter import evaluate
def _snake_case ( snake_case__ : int ):
return x + 2
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
A = "x = 3"
A = {}
A = evaluate(A_ ,{} ,state=A_ )
assert result == 3
self.assertDictEqual(A_ ,{'x': 3} )
A = "x = y"
A = {"y": 5}
A = evaluate(A_ ,{} ,state=A_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A_ ,{'x': 5, 'y': 5} )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
A = "y = add_two(x)"
A = {"x": 3}
A = evaluate(A_ ,{'add_two': add_two} ,state=A_ )
assert result == 5
self.assertDictEqual(A_ ,{'x': 3, 'y': 5} )
# Won't work without the tool
with CaptureStdout() as out:
A = evaluate(A_ ,{} ,state=A_ )
assert result is None
assert "tried to execute add_two" in out.out
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
A = "x = 3"
A = {}
A = evaluate(A_ ,{} ,state=A_ )
assert result == 3
self.assertDictEqual(A_ ,{'x': 3} )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
A = "test_dict = {'x': x, 'y': add_two(x)}"
A = {"x": 3}
A = evaluate(A_ ,{'add_two': add_two} ,state=A_ )
self.assertDictEqual(A_ ,{'x': 3, 'y': 5} )
self.assertDictEqual(A_ ,{'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Tuple:
A = "x = 3\ny = 5"
A = {}
A = evaluate(A_ ,{} ,state=A_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A_ ,{'x': 3, 'y': 5} )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> List[Any]:
A = "text = f'This is x: {x}.'"
A = {"x": 3}
A = evaluate(A_ ,{} ,state=A_ )
# evaluate returns the value of the last assignment.
assert result == "This is x: 3."
self.assertDictEqual(A_ ,{'x': 3, 'text': 'This is x: 3.'} )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = "if x <= 3:\n y = 2\nelse:\n y = 5"
A = {"x": 3}
A = evaluate(A_ ,{} ,state=A_ )
# evaluate returns the value of the last assignment.
assert result == 2
self.assertDictEqual(A_ ,{'x': 3, 'y': 2} )
A = {"x": 8}
A = evaluate(A_ ,{} ,state=A_ )
# evaluate returns the value of the last assignment.
assert result == 5
self.assertDictEqual(A_ ,{'x': 8, 'y': 5} )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
A = "test_list = [x, add_two(x)]"
A = {"x": 3}
A = evaluate(A_ ,{'add_two': add_two} ,state=A_ )
self.assertListEqual(A_ ,[3, 5] )
self.assertDictEqual(A_ ,{'x': 3, 'test_list': [3, 5]} )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
A = "y = x"
A = {"x": 3}
A = evaluate(A_ ,{} ,state=A_ )
assert result == 3
self.assertDictEqual(A_ ,{'x': 3, 'y': 3} )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = "test_list = [x, add_two(x)]\ntest_list[1]"
A = {"x": 3}
A = evaluate(A_ ,{'add_two': add_two} ,state=A_ )
assert result == 5
self.assertDictEqual(A_ ,{'x': 3, 'test_list': [3, 5]} )
A = "test_dict = {'x': x, 'y': add_two(x)}\ntest_dict['y']"
A = {"x": 3}
A = evaluate(A_ ,{'add_two': add_two} ,state=A_ )
assert result == 5
self.assertDictEqual(A_ ,{'x': 3, 'test_dict': {'x': 3, 'y': 5}} )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
A = "x = 0\nfor i in range(3):\n x = i"
A = {}
A = evaluate(A_ ,{'range': range} ,state=A_ )
assert result == 2
self.assertDictEqual(A_ ,{'x': 2, 'i': 2} ) | 714 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import EsmConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import numpy
import tensorflow as tf
from transformers.models.esm.modeling_tf_esm import (
TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
TFEsmModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any] ,) -> Optional[int]:
A = parent
A = 13
A = 7
A = True
A = True
A = True
A = 99
A = 32
A = 2
A = 4
A = 37
A = 'gelu'
A = 0.1
A = 0.1
A = 512
A = 16
A = 2
A = 0.02
A = 3
A = 4
A = None
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = EsmConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,pad_token_id=1 ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_range=self.initializer_range ,)
return config, input_ids, input_mask, sequence_labels, token_labels, choice_labels
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = self.prepare_config_and_inputs()
A = True
A = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
A = ids_tensor([self.batch_size, self.seq_length] ,vocab_size=2 )
return (
config,
input_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : List[str] ,A_ : Optional[int] ,A_ : List[Any] ,A_ : Any ,A_ : Any ) -> Dict:
A = TFEsmModel(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Any ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,) -> Optional[int]:
A = True
A = TFEsmModel(config=A_ )
A = {
'input_ids': input_ids,
'attention_mask': input_mask,
'encoder_hidden_states': encoder_hidden_states,
'encoder_attention_mask': encoder_attention_mask,
}
A = model(A_ )
A = [input_ids, input_mask]
A = model(A_ ,encoder_hidden_states=A_ )
# Also check the case where encoder outputs are not passed
A = model(A_ ,attention_mask=A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Optional[int] ,A_ : Optional[Any] ,A_ : List[Any] ) -> Dict:
A = TFEsmForMaskedLM(config=A_ )
A = model([input_ids, input_mask] )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[Any] ,A_ : int ,A_ : Tuple ,A_ : Optional[int] ) -> Union[str, Any]:
A = self.num_labels
A = TFEsmForTokenClassification(config=A_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask}
A = model(A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
TFEsmModel,
TFEsmForMaskedLM,
TFEsmForSequenceClassification,
TFEsmForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase: List[str] = (
{
'''feature-extraction''': TFEsmModel,
'''fill-mask''': TFEsmForMaskedLM,
'''text-classification''': TFEsmForSequenceClassification,
'''token-classification''': TFEsmForTokenClassification,
'''zero-shot''': TFEsmForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = TFEsmModelTester(self )
A = ConfigTester(self ,config_class=A_ ,hidden_size=37 )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[int]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*A_ )
@slow
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
for model_name in TF_ESM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = TFEsmModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
pass
@unittest.skip('Protein models do not support embedding resizing.' )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
pass
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A , A = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A = model_class(A_ )
assert isinstance(model.get_input_embeddings() ,tf.keras.layers.Layer )
if model_class is TFEsmForMaskedLM:
# Output embedding test differs from the main test because they're a matrix, not a layer
A = model.get_bias()
assert isinstance(A_ ,A_ )
for k, v in name.items():
assert isinstance(A_ ,tf.Variable )
else:
A = model.get_output_embeddings()
assert x is None
A = model.get_bias()
assert name is None
@require_tf
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = TFEsmForMaskedLM.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 1, 2, 3, 4, 5]] )
A = model(A_ )[0]
A = [1, 6, 33]
self.assertEqual(list(output.numpy().shape ) ,A_ )
# compare the actual values for a slice.
A = tf.constant(
[
[
[8.92_15_18, -10.58_98_14, -6.4_67_13_07],
[-6.3_96_71_56, -13.91_13_77, -1.1_21_19_15],
[-7.78_12_47, -13.95_15_57, -3.74_05_92],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-2 ) )
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
A = TFEsmModel.from_pretrained('facebook/esm2_t6_8M_UR50D' )
A = tf.constant([[0, 6, 4, 13, 5, 4, 16, 12, 11, 7, 2]] )
A = model(A_ )[0]
# compare the actual values for a slice.
A = tf.constant(
[
[
[0.14_44_30_92, 0.54_12_53_27, 0.3_24_77_39],
[0.30_34_04_84, 0.00_52_66_76, 0.31_07_77_22],
[0.32_27_80_43, -0.24_98_70_96, 0.3_41_46_28],
]
] )
self.assertTrue(numpy.allclose(output[:, :3, :3].numpy() ,expected_slice.numpy() ,atol=1e-4 ) ) | 22 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple ,A_ : Union[str, Any] ,A_ : Optional[int]=13 ,A_ : Dict=7 ,A_ : str=True ,A_ : str=True ,A_ : Optional[int]=True ,A_ : Any=True ,A_ : Dict=99 ,A_ : str=[1, 1, 2] ,A_ : List[Any]=1 ,A_ : List[Any]=32 ,A_ : Optional[int]=4 ,A_ : str=8 ,A_ : Any=37 ,A_ : Union[str, Any]="gelu_new" ,A_ : List[Any]=0.1 ,A_ : Optional[int]=0.1 ,A_ : Dict=0.0 ,A_ : List[Any]=512 ,A_ : Optional[int]=3 ,A_ : Dict=0.02 ,A_ : List[str]=3 ,A_ : List[str]=4 ,A_ : Any=None ,A_ : Optional[int]=False ,) -> Any:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_mask
A = use_token_type_ids
A = use_labels
A = vocab_size
A = block_sizes
A = num_decoder_layers
A = d_model
A = n_head
A = d_head
A = d_inner
A = hidden_act
A = hidden_dropout
A = attention_dropout
A = activation_dropout
A = max_position_embeddings
A = type_vocab_size
A = 2
A = num_labels
A = num_choices
A = scope
A = initializer_std
# Used in the tests to check the size of the first attention layer
A = n_head
# Used in the tests to check the size of the first hidden state
A = self.d_model
# Used in the tests to check the number of output hidden states/attentions
A = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
A = self.num_hidden_layers + 2
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = None
if self.use_input_mask:
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,self.num_choices )
A = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : str ,A_ : int ,A_ : str ,A_ : List[str] ,A_ : Union[str, Any] ,) -> Tuple:
A = TFFunnelModel(config=lowercase_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A = model(lowercase_ )
A = [input_ids, input_mask]
A = model(lowercase_ )
A = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
A = False
A = TFFunnelModel(config=lowercase_ )
A = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
A = False
A = TFFunnelModel(config=lowercase_ )
A = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Any ,A_ : Dict ,A_ : int ,A_ : Any ,A_ : Tuple ,A_ : List[str] ,A_ : str ,) -> Tuple:
A = TFFunnelBaseModel(config=lowercase_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A = model(lowercase_ )
A = [input_ids, input_mask]
A = model(lowercase_ )
A = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
A = False
A = TFFunnelBaseModel(config=lowercase_ )
A = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
A = False
A = TFFunnelBaseModel(config=lowercase_ )
A = model(lowercase_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : str ,A_ : str ,A_ : Tuple ,A_ : Optional[int] ,A_ : str ,A_ : Optional[Any] ,) -> Any:
A = TFFunnelForPreTraining(config=lowercase_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : int ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : Union[str, Any] ,) -> int:
A = TFFunnelForMaskedLM(config=lowercase_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : List[str] ,A_ : Optional[int] ,A_ : str ,A_ : int ,A_ : str ,A_ : Dict ,A_ : Union[str, Any] ,) -> List[str]:
A = self.num_labels
A = TFFunnelForSequenceClassification(config=lowercase_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[str] ,A_ : List[str] ,A_ : Any ,A_ : int ,A_ : Any ,A_ : Tuple ,A_ : List[str] ,) -> Optional[int]:
A = self.num_choices
A = TFFunnelForMultipleChoice(config=lowercase_ )
A = tf.tile(tf.expand_dims(lowercase_ ,1 ) ,(1, self.num_choices, 1) )
A = tf.tile(tf.expand_dims(lowercase_ ,1 ) ,(1, self.num_choices, 1) )
A = tf.tile(tf.expand_dims(lowercase_ ,1 ) ,(1, self.num_choices, 1) )
A = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
A = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : List[Any] ,A_ : str ,A_ : Union[str, Any] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Union[str, Any] ,A_ : Optional[int] ,) -> Optional[Any]:
A = self.num_labels
A = TFFunnelForTokenClassification(config=lowercase_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A = model(lowercase_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : Optional[Any] ,A_ : str ,A_ : Optional[Any] ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : Optional[Any] ,) -> Optional[int]:
A = TFFunnelForQuestionAnswering(config=lowercase_ )
A = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
A = model(lowercase_ )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class lowerCAmelCase_ ( _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
_lowerCamelCase: str = (
{
'''feature-extraction''': (TFFunnelBaseModel, TFFunnelModel),
'''fill-mask''': TFFunnelForMaskedLM,
'''question-answering''': TFFunnelForQuestionAnswering,
'''text-classification''': TFFunnelForSequenceClassification,
'''token-classification''': TFFunnelForTokenClassification,
'''zero-shot''': TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
_lowerCamelCase: Union[str, Any] = False
_lowerCamelCase: List[Any] = False
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
A = TFFunnelModelTester(self )
A = ConfigTester(self ,config_class=lowercase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Union[str, Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowercase_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*lowercase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*lowercase_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*lowercase_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*lowercase_ )
@require_tf
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[str] = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
_lowerCamelCase: int = False
_lowerCamelCase: List[str] = False
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = TFFunnelModelTester(self ,base=lowercase_ )
A = ConfigTester(self ,config_class=lowercase_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*lowercase_ )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*lowercase_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*lowercase_ ) | 715 |
"""simple docstring"""
# this script reports modified .py files under the desired list of top-level sub-dirs passed as a list of arguments, e.g.:
# python ./utils/get_modified_files.py utils src tests examples
#
# it uses git to find the forking point and which files were modified - i.e. files not under git won't be considered
# since the output of this script is fed into Makefile commands it doesn't print a newline after the results
import re
import subprocess
import sys
_lowercase = subprocess.check_output('''git merge-base main HEAD'''.split()).decode('''utf-8''')
_lowercase = subprocess.check_output(F"""git diff --name-only {fork_point_sha}""".split()).decode('''utf-8''').split()
_lowercase = '''|'''.join(sys.argv[1:])
_lowercase = re.compile(rF"""^({joined_dirs}).*?\.py$""")
_lowercase = [x for x in modified_files if regex.match(x)]
print(''' '''.join(relevant_modified_files), end='''''') | 22 | 0 |
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class lowerCAmelCase_ ( __lowerCAmelCase , __lowerCAmelCase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: int = AutoencoderKL
_lowerCamelCase: Union[str, Any] = '''sample'''
_lowerCamelCase: int = 1E-2
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Any:
A = 4
A = 3
A = (32, 32)
A = floats_tensor((batch_size, num_channels) + sizes ).to(lowerCAmelCase_ )
return {"sample": image}
@property
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
return (3, 32, 32)
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> int:
return (3, 32, 32)
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
A = {
'block_out_channels': [32, 64],
'in_channels': 3,
'out_channels': 3,
'down_block_types': ['DownEncoderBlock2D', 'DownEncoderBlock2D'],
'up_block_types': ['UpDecoderBlock2D', 'UpDecoderBlock2D'],
'latent_channels': 4,
}
A = self.dummy_input
return init_dict, inputs_dict
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
pass
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
pass
@unittest.skipIf(torch_device == 'mps' ,'Gradient checkpointing skipped on MPS' )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
# enable deterministic behavior for gradient checkpointing
A , A = self.prepare_init_args_and_inputs_for_common()
A = self.model_class(**lowerCAmelCase_ )
model.to(lowerCAmelCase_ )
assert not model.is_gradient_checkpointing and model.training
A = model(**lowerCAmelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
A = torch.randn_like(lowerCAmelCase_ )
A = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
A = self.model_class(**lowerCAmelCase_ )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(lowerCAmelCase_ )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
A = model_a(**lowerCAmelCase_ ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
A = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
A = dict(model.named_parameters() )
A = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data ,named_params_a[name].grad.data ,atol=5e-5 ) )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
A , A = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' ,output_loading_info=lowerCAmelCase_ )
self.assertIsNotNone(lowerCAmelCase_ )
self.assertEqual(len(loading_info['missing_keys'] ) ,0 )
model.to(lowerCAmelCase_ )
A = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = AutoencoderKL.from_pretrained('fusing/autoencoder-kl-dummy' )
A = model.to(lowerCAmelCase_ )
model.eval()
if torch_device == "mps":
A = torch.manual_seed(0 )
else:
A = torch.Generator(device=lowerCAmelCase_ ).manual_seed(0 )
A = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
A = image.to(lowerCAmelCase_ )
with torch.no_grad():
A = model(lowerCAmelCase_ ,sample_posterior=lowerCAmelCase_ ,generator=lowerCAmelCase_ ).sample
A = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
A = torch.tensor(
[
-4.0_078e-01,
-3.8_323e-04,
-1.2_681e-01,
-1.1_462e-01,
2.0_095e-01,
1.0_893e-01,
-8.8_247e-02,
-3.0_361e-01,
-9.8_644e-03,
] )
elif torch_device == "cpu":
A = torch.tensor(
[-0.13_52, 0.08_78, 0.04_19, -0.08_18, -0.10_69, 0.06_88, -0.14_58, -0.44_46, -0.00_26] )
else:
A = torch.tensor(
[-0.24_21, 0.46_42, 0.25_07, -0.04_38, 0.06_82, 0.31_60, -0.20_18, -0.07_27, 0.24_85] )
self.assertTrue(torch_all_close(lowerCAmelCase_ ,lowerCAmelCase_ ,rtol=1e-2 ) )
@slow
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : Optional[Any] ,A_ : Optional[Any] ) -> List[Any]:
return F'gaussian_noise_s={seed}_shape={"_".join([str(lowerCAmelCase_ ) for s in shape] )}.npy'
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
# clean up the VRAM after each test
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[int]=0 ,A_ : Tuple=(4, 3, 512, 512) ,A_ : List[Any]=False ) -> Any:
A = torch.floataa if fpaa else torch.floataa
A = torch.from_numpy(load_hf_numpy(self.get_file_format(lowerCAmelCase_ ,lowerCAmelCase_ ) ) ).to(lowerCAmelCase_ ).to(lowerCAmelCase_ )
return image
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any]="CompVis/stable-diffusion-v1-4" ,A_ : Optional[int]=False ) -> int:
A = 'fp16' if fpaa else None
A = torch.floataa if fpaa else torch.floataa
A = AutoencoderKL.from_pretrained(
lowerCAmelCase_ ,subfolder='vae' ,torch_dtype=lowerCAmelCase_ ,revision=lowerCAmelCase_ ,)
model.to(lowerCAmelCase_ ).eval()
return model
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any]=0 ) -> int:
if torch_device == "mps":
return torch.manual_seed(lowerCAmelCase_ )
return torch.Generator(device=lowerCAmelCase_ ).manual_seed(lowerCAmelCase_ )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_03, 0.98_78, -0.04_95, -0.07_90, -0.27_09, 0.83_75, -0.20_60, -0.08_24], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_76, 0.11_68, 0.13_32, -0.48_40, -0.25_08, -0.07_91, -0.04_93, -0.40_89], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Dict ,A_ : Any ,A_ : Union[str, Any] ) -> Tuple:
A = self.get_sd_vae_model()
A = self.get_sd_image(lowerCAmelCase_ )
A = self.get_generator(lowerCAmelCase_ )
with torch.no_grad():
A = model(lowerCAmelCase_ ,generator=lowerCAmelCase_ ,sample_posterior=lowerCAmelCase_ ).sample
assert sample.shape == image.shape
A = sample[-1, -2:, -2:, :2].flatten().float().cpu()
A = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(lowerCAmelCase_ ,lowerCAmelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.05_13, 0.02_89, 1.37_99, 0.21_66, -0.25_73, -0.08_71, 0.51_03, -0.09_99]],
[47, [-0.41_28, -0.13_20, -0.37_04, 0.19_65, -0.41_16, -0.23_32, -0.33_40, 0.22_47]],
# fmt: on
] )
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Union[str, Any] ) -> Optional[int]:
A = self.get_sd_vae_model(fpaa=lowerCAmelCase_ )
A = self.get_sd_image(lowerCAmelCase_ ,fpaa=lowerCAmelCase_ )
A = self.get_generator(lowerCAmelCase_ )
with torch.no_grad():
A = model(lowerCAmelCase_ ,generator=lowerCAmelCase_ ,sample_posterior=lowerCAmelCase_ ).sample
assert sample.shape == image.shape
A = sample[-1, -2:, :2, -2:].flatten().float().cpu()
A = torch.tensor(lowerCAmelCase_ )
assert torch_all_close(lowerCAmelCase_ ,lowerCAmelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.16_09, 0.98_66, -0.04_87, -0.07_77, -0.27_16, 0.83_68, -0.20_55, -0.08_14], [-0.23_95, 0.00_98, 0.01_02, -0.07_09, -0.28_40, -0.02_74, -0.07_18, -0.18_24]],
[47, [-0.23_77, 0.11_47, 0.13_33, -0.48_41, -0.25_06, -0.08_05, -0.04_91, -0.40_85], [0.03_50, 0.08_47, 0.04_67, 0.03_44, -0.08_42, -0.05_47, -0.06_33, -0.11_31]],
# fmt: on
] )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Dict ,A_ : Any ,A_ : Tuple ) -> Any:
A = self.get_sd_vae_model()
A = self.get_sd_image(lowerCAmelCase_ )
with torch.no_grad():
A = model(lowerCAmelCase_ ).sample
assert sample.shape == image.shape
A = sample[-1, -2:, -2:, :2].flatten().float().cpu()
A = torch.tensor(expected_slice_mps if torch_device == 'mps' else expected_slice )
assert torch_all_close(lowerCAmelCase_ ,lowerCAmelCase_ ,atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.20_51, -0.18_03, -0.23_11, -0.21_14, -0.32_92, -0.35_74, -0.29_53, -0.33_23]],
[37, [-0.26_32, -0.26_25, -0.21_99, -0.27_41, -0.45_39, -0.49_90, -0.37_20, -0.49_25]],
# fmt: on
] )
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Tuple ,A_ : Any ) -> Any:
A = self.get_sd_vae_model()
A = self.get_sd_image(lowerCAmelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
A = model.decode(lowerCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
A = sample[-1, -2:, :2, -2:].flatten().cpu()
A = torch.tensor(lowerCAmelCase_ )
assert torch_all_close(lowerCAmelCase_ ,lowerCAmelCase_ ,atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.03_69, 0.02_07, -0.07_76, -0.06_82, -0.17_47, -0.19_30, -0.14_65, -0.20_39]],
[16, [-0.16_28, -0.21_34, -0.27_47, -0.26_42, -0.37_74, -0.44_04, -0.36_87, -0.42_77]],
# fmt: on
] )
@require_torch_gpu
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[int] ,A_ : List[Any] ) -> Optional[int]:
A = self.get_sd_vae_model(fpaa=lowerCAmelCase_ )
A = self.get_sd_image(lowerCAmelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCAmelCase_ )
with torch.no_grad():
A = model.decode(lowerCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
A = sample[-1, -2:, :2, -2:].flatten().float().cpu()
A = torch.tensor(lowerCAmelCase_ )
assert torch_all_close(lowerCAmelCase_ ,lowerCAmelCase_ ,atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason='xformers is not required when using PyTorch 2.0.' )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ) -> Optional[Any]:
A = self.get_sd_vae_model(fpaa=lowerCAmelCase_ )
A = self.get_sd_image(lowerCAmelCase_ ,shape=(3, 4, 64, 64) ,fpaa=lowerCAmelCase_ )
with torch.no_grad():
A = model.decode(lowerCAmelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
A = model.decode(lowerCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCAmelCase_ ,lowerCAmelCase_ ,atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() ,reason='xformers is not required when using PyTorch 2.0.' )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> int:
A = self.get_sd_vae_model()
A = self.get_sd_image(lowerCAmelCase_ ,shape=(3, 4, 64, 64) )
with torch.no_grad():
A = model.decode(lowerCAmelCase_ ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
A = model.decode(lowerCAmelCase_ ).sample
assert list(sample.shape ) == [3, 3, 512, 512]
assert torch_all_close(lowerCAmelCase_ ,lowerCAmelCase_ ,atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.30_01, 0.09_18, -2.69_84, -3.97_20, -3.20_99, -5.03_53, 1.73_38, -0.20_65, 3.42_67]],
[47, [-1.50_30, -4.38_71, -6.03_55, -9.11_57, -1.66_61, -2.78_53, 2.16_07, -5.08_23, 2.56_33]],
# fmt: on
] )
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : List[str] ,A_ : str ) -> Tuple:
A = self.get_sd_vae_model()
A = self.get_sd_image(lowerCAmelCase_ )
A = self.get_generator(lowerCAmelCase_ )
with torch.no_grad():
A = model.encode(lowerCAmelCase_ ).latent_dist
A = dist.sample(generator=lowerCAmelCase_ )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
A = sample[0, -1, -3:, -3:].flatten().cpu()
A = torch.tensor(lowerCAmelCase_ )
A = 3e-3 if torch_device != 'mps' else 1e-2
assert torch_all_close(lowerCAmelCase_ ,lowerCAmelCase_ ,atol=lowerCAmelCase_ ) | 716 |
"""simple docstring"""
import sys
from collections import defaultdict
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Optional[Any] ) -> int:
A = []
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Optional[int]:
return self.node_position[vertex]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : List[Any] ,A_ : Any ) -> List[Any]:
A = pos
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : str ,A_ : Dict ,A_ : List[str] ) -> str:
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
A = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
A = 2 * start + 1
else:
A = 2 * start + 2
if heap[smallest_child] < heap[start]:
A , A = heap[smallest_child], positions[smallest_child]
A , A = (
heap[start],
positions[start],
)
A , A = temp, tempa
A = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] ,self.get_position(positions[start] ) )
self.set_position(positions[start] ,A_ )
self.top_to_bottom(A_ ,A_ ,A_ ,A_ )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : Optional[int] ,A_ : Dict ,A_ : str ,A_ : Union[str, Any] ) -> Dict:
A = position[index]
while index != 0:
A = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
A = heap[parent]
A = position[parent]
self.set_position(position[parent] ,A_ )
else:
A = val
A = temp
self.set_position(A_ ,A_ )
break
A = parent
else:
A = val
A = temp
self.set_position(A_ ,0 )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ,A_ : Dict ) -> Union[str, Any]:
A = len(A_ ) // 2 - 1
for i in range(A_ ,-1 ,-1 ):
self.top_to_bottom(A_ ,A_ ,len(A_ ) ,A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ,A_ : Dict ) -> Union[str, Any]:
A = positions[0]
A = sys.maxsize
self.top_to_bottom(A_ ,0 ,len(A_ ) ,A_ )
return temp
def _snake_case ( snake_case__ : Dict ):
A = Heap()
A = [0] * len(snake_case__ )
A = [-1] * len(snake_case__ ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
A = [] # Heap of Distance of vertices from their neighboring vertex
A = []
for vertex in range(len(snake_case__ ) ):
distance_tv.append(sys.maxsize )
positions.append(snake_case__ )
heap.node_position.append(snake_case__ )
A = []
A = 1
A = sys.maxsize
for neighbor, distance in adjacency_list[0]:
A = 0
A = distance
heap.heapify(snake_case__ , snake_case__ )
for _ in range(1 , len(snake_case__ ) ):
A = heap.delete_minimum(snake_case__ , snake_case__ )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
A = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(snake_case__ )]
):
A = distance
heap.bottom_to_top(
snake_case__ , heap.get_position(snake_case__ ) , snake_case__ , snake_case__ )
A = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
_lowercase = int(input('''Enter number of edges: ''').strip())
_lowercase = defaultdict(list)
for _ in range(edges_number):
_lowercase = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 22 | 0 |
"""simple docstring"""
def _snake_case ( snake_case__ : Tuple , snake_case__ : int ):
return number | (1 << position)
def _snake_case ( snake_case__ : str , snake_case__ : int ):
return number & ~(1 << position)
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : List[str] ):
return number ^ (1 << position)
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : List[Any] ):
return ((number >> position) & 1) == 1
def _snake_case ( snake_case__ : str , snake_case__ : Dict ):
return int((number & (1 << position)) != 0 )
if __name__ == "__main__":
import doctest
doctest.testmod()
| 717 |
"""simple docstring"""
import json
import os
import shutil
import warnings
from argparse import ArgumentParser, Namespace
from pathlib import Path
from typing import List
from ..utils import logging
from . import BaseTransformersCLICommand
try:
from cookiecutter.main import cookiecutter
_lowercase = True
except ImportError:
_lowercase = False
_lowercase = logging.get_logger(__name__) # pylint: disable=invalid-name
def _snake_case ( snake_case__ : Namespace ):
return AddNewModelCommand(args.testing , args.testing_file , path=args.path )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('add-new-model' )
add_new_model_parser.add_argument('--testing' ,action='store_true' ,help='If in testing mode.' )
add_new_model_parser.add_argument('--testing_file' ,type=A_ ,help='Configuration file on which to run.' )
add_new_model_parser.add_argument(
'--path' ,type=A_ ,help='Path to cookiecutter. Should only be used for testing purposes.' )
add_new_model_parser.set_defaults(func=A_ )
def __init__( self : Tuple ,A_ : bool ,A_ : str ,A_ : Tuple=None ,*A_ : List[str] ) -> Union[str, Any]:
A = testing
A = testing_file
A = path
def _SCREAMING_SNAKE_CASE ( self : int ) -> int:
warnings.warn(
'The command `transformers-cli add-new-model` is deprecated and will be removed in v5 of Transformers. '
'It is not actively maintained anymore, so might give a result that won\'t pass all tests and quality '
'checks, you should use `transformers-cli add-new-model-like` instead.' )
if not _has_cookiecutter:
raise ImportError(
'Model creation dependencies are required to use the `add_new_model` command. Install them by running '
'the following at the root of your `transformers` clone:\n\n\t$ pip install -e .[modelcreation]\n' )
# Ensure that there is no other `cookiecutter-template-xxx` directory in the current working directory
A = [directory for directory in os.listdir() if 'cookiecutter-template-' == directory[:22]]
if len(A_ ) > 0:
raise ValueError(
'Several directories starting with `cookiecutter-template-` in current working directory. '
'Please clean your directory by removing all folders starting with `cookiecutter-template-` or '
'change your working directory.' )
A = (
Path(A_ ).parent.parent.parent.parent if self._path is None else Path(self._path ).parent.parent
)
A = path_to_transformer_root / 'templates' / 'adding_a_new_model'
# Execute cookiecutter
if not self._testing:
cookiecutter(str(A_ ) )
else:
with open(self._testing_file ,'r' ) as configuration_file:
A = json.load(A_ )
cookiecutter(
str(path_to_cookiecutter if self._path is None else self._path ) ,no_input=A_ ,extra_context=A_ ,)
A = [directory for directory in os.listdir() if 'cookiecutter-template-' in directory[:22]][0]
# Retrieve configuration
with open(directory + '/configuration.json' ,'r' ) as configuration_file:
A = json.load(A_ )
A = configuration['lowercase_modelname']
A = configuration['generate_tensorflow_pytorch_and_flax']
os.remove(F'{directory}/configuration.json' )
A = 'PyTorch' in generate_tensorflow_pytorch_and_flax
A = 'TensorFlow' in generate_tensorflow_pytorch_and_flax
A = 'Flax' in generate_tensorflow_pytorch_and_flax
A = F'{path_to_transformer_root}/src/transformers/models/{lowercase_model_name}'
os.makedirs(A_ ,exist_ok=A_ )
os.makedirs(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}' ,exist_ok=A_ )
# Tests require submodules as they have parent imports
with open(F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/__init__.py' ,'w' ):
pass
shutil.move(
F'{directory}/__init__.py' ,F'{model_dir}/__init__.py' ,)
shutil.move(
F'{directory}/configuration_{lowercase_model_name}.py' ,F'{model_dir}/configuration_{lowercase_model_name}.py' ,)
def remove_copy_lines(A_ : int ):
with open(A_ ,'r' ) as f:
A = f.readlines()
with open(A_ ,'w' ) as f:
for line in lines:
if "# Copied from transformers." not in line:
f.write(A_ )
if output_pytorch:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_{lowercase_model_name}.py' ,F'{model_dir}/modeling_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_{lowercase_model_name}.py' )
if output_tensorflow:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_tf_{lowercase_model_name}.py' ,F'{model_dir}/modeling_tf_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_tf_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_tf_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_tf_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_tf_{lowercase_model_name}.py' )
if output_flax:
if not self._testing:
remove_copy_lines(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/modeling_flax_{lowercase_model_name}.py' ,F'{model_dir}/modeling_flax_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/test_modeling_flax_{lowercase_model_name}.py' ,F'{path_to_transformer_root}/tests/models/{lowercase_model_name}/test_modeling_flax_{lowercase_model_name}.py' ,)
else:
os.remove(F'{directory}/modeling_flax_{lowercase_model_name}.py' )
os.remove(F'{directory}/test_modeling_flax_{lowercase_model_name}.py' )
shutil.move(
F'{directory}/{lowercase_model_name}.md' ,F'{path_to_transformer_root}/docs/source/en/model_doc/{lowercase_model_name}.md' ,)
shutil.move(
F'{directory}/tokenization_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}.py' ,)
shutil.move(
F'{directory}/tokenization_fast_{lowercase_model_name}.py' ,F'{model_dir}/tokenization_{lowercase_model_name}_fast.py' ,)
from os import fdopen, remove
from shutil import copymode, move
from tempfile import mkstemp
def replace(A_ : str ,A_ : str ,A_ : List[str] ):
# Create temp file
A , A = mkstemp()
A = False
with fdopen(A_ ,'w' ) as new_file:
with open(A_ ) as old_file:
for line in old_file:
new_file.write(A_ )
if line_to_copy_below in line:
A = True
for line_to_copy in lines_to_copy:
new_file.write(A_ )
if not line_found:
raise ValueError(F'Line {line_to_copy_below} was not found in file.' )
# Copy the file permissions from the old file to the new file
copymode(A_ ,A_ )
# Remove original file
remove(A_ )
# Move new file
move(A_ ,A_ )
def skip_units(A_ : Dict ):
return (
("generating PyTorch" in line and not output_pytorch)
or ("generating TensorFlow" in line and not output_tensorflow)
or ("generating Flax" in line and not output_flax)
)
def replace_in_files(A_ : Tuple ):
with open(A_ ) as datafile:
A = []
A = False
A = False
for line in datafile:
if "# To replace in: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# Below: " in line and "##" not in line:
A = line.split('"' )[1]
A = skip_units(A_ )
elif "# End." in line and "##" not in line:
if not skip_file and not skip_snippet:
replace(A_ ,A_ ,A_ )
A = []
elif "# Replace with" in line and "##" not in line:
A = []
elif "##" not in line:
lines_to_copy.append(A_ )
remove(A_ )
replace_in_files(F'{directory}/to_replace_{lowercase_model_name}.py' )
os.rmdir(A_ ) | 22 | 0 |
"""simple docstring"""
import inspect
import os
import re
from transformers.configuration_utils import PretrainedConfig
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_config_docstrings.py
_lowercase = """src/transformers"""
# This is to make sure the transformers module imported is the one in the repo.
_lowercase = direct_transformers_import(PATH_TO_TRANSFORMERS)
_lowercase = transformers.models.auto.configuration_auto.CONFIG_MAPPING
_lowercase = {
# used to compute the property `self.chunk_length`
"""EncodecConfig""": ["""overlap"""],
# used as `self.bert_model = BertModel(config, ...)`
"""DPRConfig""": True,
# not used in modeling files, but it's an important information
"""FSMTConfig""": ["""langs"""],
# used internally in the configuration class file
"""GPTNeoConfig""": ["""attention_types"""],
# used internally in the configuration class file
"""EsmConfig""": ["""is_folding_model"""],
# used during training (despite we don't have training script for these models yet)
"""Mask2FormerConfig""": ["""ignore_value"""],
# `ignore_value` used during training (despite we don't have training script for these models yet)
# `norm` used in conversion script (despite not using in the modeling file)
"""OneFormerConfig""": ["""ignore_value""", """norm"""],
# used during preprocessing and collation, see `collating_graphormer.py`
"""GraphormerConfig""": ["""spatial_pos_max"""],
# used internally in the configuration class file
"""T5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
# `tokenizer_class` get default value `T5Tokenizer` intentionally
"""MT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
"""UMT5Config""": ["""feed_forward_proj""", """tokenizer_class"""],
# used internally in the configuration class file
"""LongT5Config""": ["""feed_forward_proj"""],
# used internally in the configuration class file
"""SwitchTransformersConfig""": ["""feed_forward_proj"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""BioGptConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""GLPNConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""SegformerConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""CvtConfig""": ["""layer_norm_eps"""],
# having default values other than `1e-5` - we can't fix them without breaking
"""PerceiverConfig""": ["""layer_norm_eps"""],
# used internally to calculate the feature size
"""InformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""TimeSeriesTransformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate the feature size
"""AutoformerConfig""": ["""num_static_real_features""", """num_time_features"""],
# used internally to calculate `mlp_dim`
"""SamVisionConfig""": ["""mlp_ratio"""],
# For (head) training, but so far not implemented
"""ClapAudioConfig""": ["""num_classes"""],
# Not used, but providing useful information to users
"""SpeechT5HifiGanConfig""": ["""sampling_rate"""],
}
# TODO (ydshieh): Check the failing cases, try to fix them or move some cases to the above block once we are sure
SPECIAL_CASES_TO_ALLOW.update(
{
'''CLIPSegConfig''': True,
'''DeformableDetrConfig''': True,
'''DetaConfig''': True,
'''DinatConfig''': True,
'''DonutSwinConfig''': True,
'''EfficientFormerConfig''': True,
'''FSMTConfig''': True,
'''JukeboxConfig''': True,
'''LayoutLMv2Config''': True,
'''MaskFormerSwinConfig''': True,
'''MT5Config''': True,
'''NatConfig''': True,
'''OneFormerConfig''': True,
'''PerceiverConfig''': True,
'''RagConfig''': True,
'''SpeechT5Config''': True,
'''SwinConfig''': True,
'''Swin2SRConfig''': True,
'''Swinv2Config''': True,
'''SwitchTransformersConfig''': True,
'''TableTransformerConfig''': True,
'''TapasConfig''': True,
'''TransfoXLConfig''': True,
'''UniSpeechConfig''': True,
'''UniSpeechSatConfig''': True,
'''WavLMConfig''': True,
'''WhisperConfig''': True,
# TODO: @Arthur (for `alignment_head` and `alignment_layer`)
'''JukeboxPriorConfig''': True,
# TODO: @Younes (for `is_decoder`)
'''Pix2StructTextConfig''': True,
}
)
def _snake_case ( snake_case__ : Tuple , snake_case__ : Tuple , snake_case__ : Optional[int] , snake_case__ : Optional[int] ):
A = False
for attribute in attributes:
for modeling_source in source_strings:
# check if we can find `config.xxx`, `getattr(config, "xxx", ...)` or `getattr(self.config, "xxx", ...)`
if (
F'config.{attribute}' in modeling_source
or F'getattr(config, "{attribute}"' in modeling_source
or F'getattr(self.config, "{attribute}"' in modeling_source
):
A = True
# Deal with multi-line cases
elif (
re.search(
rF'getattr[ \t\v\n\r\f]*\([ \t\v\n\r\f]*(self\.)?config,[ \t\v\n\r\f]*"{attribute}"' , snake_case__ , )
is not None
):
A = True
# `SequenceSummary` is called with `SequenceSummary(config)`
elif attribute in [
"summary_type",
"summary_use_proj",
"summary_activation",
"summary_last_dropout",
"summary_proj_to_labels",
"summary_first_dropout",
]:
if "SequenceSummary" in modeling_source:
A = True
if attribute_used:
break
if attribute_used:
break
# common and important attributes, even if they do not always appear in the modeling files
A = [
"bos_index",
"eos_index",
"pad_index",
"unk_index",
"mask_index",
"image_size",
"use_cache",
"out_features",
"out_indices",
]
A = ["encoder_no_repeat_ngram_size"]
# Special cases to be allowed
A = True
if not attribute_used:
A = False
for attribute in attributes:
# Allow if the default value in the configuration class is different from the one in `PretrainedConfig`
if attribute in ["is_encoder_decoder"] and default_value is True:
A = True
elif attribute in ["tie_word_embeddings"] and default_value is False:
A = True
# Allow cases without checking the default value in the configuration class
elif attribute in attributes_to_allow + attributes_used_in_generation:
A = True
elif attribute.endswith('_token_id' ):
A = True
# configuration class specific cases
if not case_allowed:
A = SPECIAL_CASES_TO_ALLOW.get(config_class.__name__ , [] )
A = allowed_cases is True or attribute in allowed_cases
return attribute_used or case_allowed
def _snake_case ( snake_case__ : int ):
A = dict(inspect.signature(config_class.__init__ ).parameters )
A = [x for x in list(signature.keys() ) if x not in ["self", "kwargs"]]
A = [signature[param].default for param in parameter_names]
# If `attribute_map` exists, an attribute can have different names to be used in the modeling files, and as long
# as one variant is used, the test should pass
A = {}
if len(config_class.attribute_map ) > 0:
A = {v: k for k, v in config_class.attribute_map.items()}
# Get the path to modeling source files
A = inspect.getsourcefile(snake_case__ )
A = os.path.dirname(snake_case__ )
# Let's check against all frameworks: as long as one framework uses an attribute, we are good.
A = [os.path.join(snake_case__ , snake_case__ ) for fn in os.listdir(snake_case__ ) if fn.startswith('modeling_' )]
# Get the source code strings
A = []
for path in modeling_paths:
if os.path.isfile(snake_case__ ):
with open(snake_case__ ) as fp:
modeling_sources.append(fp.read() )
A = []
for config_param, default_value in zip(snake_case__ , snake_case__ ):
# `attributes` here is all the variant names for `config_param`
A = [config_param]
# some configuration classes have non-empty `attribute_map`, and both names could be used in the
# corresponding modeling files. As long as one of them appears, it is fine.
if config_param in reversed_attribute_map:
attributes.append(reversed_attribute_map[config_param] )
if not check_attribute_being_used(snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
unused_attributes.append(attributes[0] )
return sorted(snake_case__ )
def _snake_case ( ):
A = {}
for _config_class in list(CONFIG_MAPPING.values() ):
# Skip deprecated models
if "models.deprecated" in _config_class.__module__:
continue
# Some config classes are not in `CONFIG_MAPPING` (e.g. `CLIPVisionConfig`, `Blip2VisionConfig`, etc.)
A = [
cls
for name, cls in inspect.getmembers(
inspect.getmodule(_config_class ) , lambda snake_case__ : inspect.isclass(snake_case__ )
and issubclass(snake_case__ , snake_case__ )
and inspect.getmodule(snake_case__ ) == inspect.getmodule(_config_class ) , )
]
for config_class in config_classes_in_module:
A = check_config_attributes_being_used(snake_case__ )
if len(snake_case__ ) > 0:
A = unused_attributes
if len(snake_case__ ) > 0:
A = "The following configuration classes contain unused attributes in the corresponding modeling files:\n"
for name, attributes in configs_with_unused_attributes.items():
error += F'{name}: {attributes}\n'
raise ValueError(snake_case__ )
if __name__ == "__main__":
check_config_attributes()
| 718 |
"""simple docstring"""
import json
import os
import tempfile
import unittest
import numpy as np
from datasets import load_dataset
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ImageGPTImageProcessor
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : int ,A_ : Tuple ,A_ : str=7 ,A_ : Tuple=3 ,A_ : List[Any]=18 ,A_ : List[str]=30 ,A_ : Optional[Any]=400 ,A_ : Any=True ,A_ : Optional[Any]=None ,A_ : List[str]=True ,) -> str:
A = size if size is not None else {'height': 18, 'width': 18}
A = parent
A = batch_size
A = num_channels
A = image_size
A = min_resolution
A = max_resolution
A = do_resize
A = size
A = do_normalize
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
return {
# here we create 2 clusters for the sake of simplicity
"clusters": np.asarray(
[
[0.88_66_44_36_34_03_32_03, 0.66_18_82_93_69_54_49_83, 0.38_91_74_64_01_78_68_04],
[-0.60_42_55_91_46_88_11_04, -0.0_22_95_00_88_60_52_84_69, 0.54_23_79_73_69_00_32_96],
] ),
"do_resize": self.do_resize,
"size": self.size,
"do_normalize": self.do_normalize,
}
@require_torch
@require_vision
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: List[Any] = ImageGPTImageProcessor if is_vision_available() else None
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
A = ImageGPTImageProcessingTester(self )
@property
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Union[str, Any]:
return self.image_processor_tester.prepare_image_processor_dict()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[Any]:
A = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(A_ ,'clusters' ) )
self.assertTrue(hasattr(A_ ,'do_resize' ) )
self.assertTrue(hasattr(A_ ,'size' ) )
self.assertTrue(hasattr(A_ ,'do_normalize' ) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Tuple:
A = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size ,{'height': 18, 'width': 18} )
A = self.image_processing_class.from_dict(self.image_processor_dict ,size=42 )
self.assertEqual(image_processor.size ,{'height': 42, 'width': 42} )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
A = self.image_processing_class(**self.image_processor_dict )
A = json.loads(image_processor.to_json_string() )
for key, value in self.image_processor_dict.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,obj[key] ) )
else:
self.assertEqual(obj[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Union[str, Any]:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A = os.path.join(A_ ,'image_processor.json' )
image_processor_first.to_json_file(A_ )
A = self.image_processing_class.from_json_file(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
A = self.image_processing_class(**self.image_processor_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
image_processor_first.save_pretrained(A_ )
A = self.image_processing_class.from_pretrained(A_ ).to_dict()
A = image_processor_first.to_dict()
for key, value in image_processor_first.items():
if key == "clusters":
self.assertTrue(np.array_equal(A_ ,image_processor_second[key] ) )
else:
self.assertEqual(image_processor_first[key] ,A_ )
@unittest.skip('ImageGPT requires clusters at initialization' )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Union[str, Any]:
pass
def _snake_case ( ):
A = load_dataset('hf-internal-testing/fixtures_image_utils' , split='test' )
A = Image.open(dataset[4]['file'] )
A = Image.open(dataset[5]['file'] )
A = [imagea, imagea]
return images
@require_vision
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : str ) -> int:
A = ImageGPTImageProcessor.from_pretrained('openai/imagegpt-small' )
A = prepare_images()
# test non-batched
A = image_processing(images[0] ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(1, 1024) )
A = [306, 191, 191]
self.assertEqual(encoding.input_ids[0, :3].tolist() ,A_ )
# test batched
A = image_processing(A_ ,return_tensors='pt' )
self.assertIsInstance(encoding.input_ids ,torch.LongTensor )
self.assertEqual(encoding.input_ids.shape ,(2, 1024) )
A = [303, 13, 13]
self.assertEqual(encoding.input_ids[1, -3:].tolist() ,A_ ) | 22 | 0 |
"""simple docstring"""
from jiwer import compute_measures
import datasets
_lowercase = '''\
@inproceedings{inproceedings,
author = {Morris, Andrew and Maier, Viktoria and Green, Phil},
year = {2004},
month = {01},
pages = {},
title = {From WER and RIL to MER and WIL: improved evaluation measures for connected speech recognition.}
}
'''
_lowercase = '''\
Word error rate (WER) is a common metric of the performance of an automatic speech recognition system.
The general difficulty of measuring performance lies in the fact that the recognized word sequence can have a different length from the reference word sequence (supposedly the correct one). The WER is derived from the Levenshtein distance, working at the word level instead of the phoneme level. The WER is a valuable tool for comparing different systems as well as for evaluating improvements within one system. This kind of measurement, however, provides no details on the nature of translation errors and further work is therefore required to identify the main source(s) of error and to focus any research effort.
This problem is solved by first aligning the recognized word sequence with the reference (spoken) word sequence using dynamic string alignment. Examination of this issue is seen through a theory called the power law that states the correlation between perplexity and word error rate.
Word error rate can then be computed as:
WER = (S + D + I) / N = (S + D + I) / (S + D + C)
where
S is the number of substitutions,
D is the number of deletions,
I is the number of insertions,
C is the number of correct words,
N is the number of words in the reference (N=S+D+C).
This value indicates the average number of errors per reference word. The lower the value, the better the
performance of the ASR system with a WER of 0 being a perfect score.
'''
_lowercase = '''
Compute WER score of transcribed segments against references.
Args:
references: List of references for each speech input.
predictions: List of transcriptions to score.
concatenate_texts (bool, default=False): Whether to concatenate all input texts or compute WER iteratively.
Returns:
(float): the word error rate
Examples:
>>> predictions = ["this is the prediction", "there is an other sample"]
>>> references = ["this is the reference", "there is another one"]
>>> wer = datasets.load_metric("wer")
>>> wer_score = wer.compute(predictions=predictions, references=references)
>>> print(wer_score)
0.5
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class lowerCAmelCase_ ( datasets.Metric ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
return datasets.MetricInfo(
description=_DESCRIPTION ,citation=_CITATION ,inputs_description=_KWARGS_DESCRIPTION ,features=datasets.Features(
{
'predictions': datasets.Value('string' ,id='sequence' ),
'references': datasets.Value('string' ,id='sequence' ),
} ) ,codebase_urls=['https://github.com/jitsi/jiwer/'] ,reference_urls=[
'https://en.wikipedia.org/wiki/Word_error_rate',
] ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any]=None ,A_ : Union[str, Any]=None ,A_ : Dict=False ) -> List[str]:
if concatenate_texts:
return compute_measures(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )["wer"]
else:
A = 0
A = 0
for prediction, reference in zip(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ ):
A = compute_measures(SCREAMING_SNAKE_CASE_ ,SCREAMING_SNAKE_CASE_ )
incorrect += measures["substitutions"] + measures["deletions"] + measures["insertions"]
total += measures["substitutions"] + measures["deletions"] + measures["hits"]
return incorrect / total | 719 |
"""simple docstring"""
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def _snake_case ( snake_case__ : Optional[int] ):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> Any:
A = parser.add_parser('download' )
download_parser.add_argument(
'--cache-dir' ,type=A_ ,default=A_ ,help='Path to location to store the models' )
download_parser.add_argument(
'--force' ,action='store_true' ,help='Force the model to be download even if already in cache-dir' )
download_parser.add_argument(
'--trust-remote-code' ,action='store_true' ,help='Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you\'ve reviewed the code as it will execute on your local machine' ,)
download_parser.add_argument('model' ,type=A_ ,help='Name of the model to download' )
download_parser.set_defaults(func=A_ )
def __init__( self : Dict ,A_ : str ,A_ : str ,A_ : bool ,A_ : bool ) -> Union[str, Any]:
A = model
A = cache
A = force
A = trust_remote_code
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code ) | 22 | 0 |
"""simple docstring"""
import collections
import gzip
import os
import urllib
import numpy
from tensorflow.python.framework import dtypes, random_seed
from tensorflow.python.platform import gfile
from tensorflow.python.util.deprecation import deprecated
_lowercase = collections.namedtuple('''_Datasets''', ['''train''', '''validation''', '''test'''])
# CVDF mirror of http://yann.lecun.com/exdb/mnist/
_lowercase = """https://storage.googleapis.com/cvdf-datasets/mnist/"""
def _snake_case ( snake_case__ : Dict ):
A = numpy.dtype(numpy.uintaa ).newbyteorder('>' )
return numpy.frombuffer(bytestream.read(4 ) , dtype=snake_case__ )[0]
@deprecated(snake_case__ , 'Please use tf.data to implement this functionality.' )
def _snake_case ( snake_case__ : Any ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=snake_case__ ) as bytestream:
A = _readaa(snake_case__ )
if magic != 2051:
raise ValueError(
'Invalid magic number %d in MNIST image file: %s' % (magic, f.name) )
A = _readaa(snake_case__ )
A = _readaa(snake_case__ )
A = _readaa(snake_case__ )
A = bytestream.read(rows * cols * num_images )
A = numpy.frombuffer(snake_case__ , dtype=numpy.uinta )
A = data.reshape(snake_case__ , snake_case__ , snake_case__ , 1 )
return data
@deprecated(snake_case__ , 'Please use tf.one_hot on tensors.' )
def _snake_case ( snake_case__ : Dict , snake_case__ : Optional[Any] ):
A = labels_dense.shape[0]
A = numpy.arange(snake_case__ ) * num_classes
A = numpy.zeros((num_labels, num_classes) )
A = 1
return labels_one_hot
@deprecated(snake_case__ , 'Please use tf.data to implement this functionality.' )
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Any=False , snake_case__ : List[str]=10 ):
print('Extracting' , f.name )
with gzip.GzipFile(fileobj=snake_case__ ) as bytestream:
A = _readaa(snake_case__ )
if magic != 2049:
raise ValueError(
'Invalid magic number %d in MNIST label file: %s' % (magic, f.name) )
A = _readaa(snake_case__ )
A = bytestream.read(snake_case__ )
A = numpy.frombuffer(snake_case__ , dtype=numpy.uinta )
if one_hot:
return _dense_to_one_hot(snake_case__ , snake_case__ )
return labels
class lowerCAmelCase_ :
'''simple docstring'''
@deprecated(
snake_case_ ,'Please use alternatives such as official/mnist/_DataSet.py'
' from tensorflow/models.' ,)
def __init__( self : Optional[int] ,A_ : Union[str, Any] ,A_ : str ,A_ : Dict=False ,A_ : Union[str, Any]=False ,A_ : Optional[Any]=dtypes.floataa ,A_ : Optional[int]=True ,A_ : int=None ,) -> Optional[Any]:
A = random_seed.get_seed(snake_case_ )
# If op level seed is not set, use whatever graph level seed is returned
numpy.random.seed(seeda if seed is None else seeda )
A = dtypes.as_dtype(snake_case_ ).base_dtype
if dtype not in (dtypes.uinta, dtypes.floataa):
raise TypeError('Invalid image dtype %r, expected uint8 or float32' % dtype )
if fake_data:
A = 1_0000
A = one_hot
else:
assert (
images.shape[0] == labels.shape[0]
), F'images.shape: {images.shape} labels.shape: {labels.shape}'
A = images.shape[0]
# Convert shape from [num examples, rows, columns, depth]
# to [num examples, rows*columns] (assuming depth == 1)
if reshape:
assert images.shape[3] == 1
A = images.reshape(
images.shape[0] ,images.shape[1] * images.shape[2] )
if dtype == dtypes.floataa:
# Convert from [0, 255] -> [0.0, 1.0].
A = images.astype(numpy.floataa )
A = numpy.multiply(snake_case_ ,1.0 / 2_55.0 )
A = images
A = labels
A = 0
A = 0
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Optional[Any]:
return self._images
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
return self._labels
@property
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
return self._num_examples
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return self._epochs_completed
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : str ,A_ : List[Any]=False ,A_ : Union[str, Any]=True ) -> List[str]:
if fake_data:
A = [1] * 784
A = [1] + [0] * 9 if self.one_hot else 0
return (
[fake_image for _ in range(snake_case_ )],
[fake_label for _ in range(snake_case_ )],
)
A = self._index_in_epoch
# Shuffle for the first epoch
if self._epochs_completed == 0 and start == 0 and shuffle:
A = numpy.arange(self._num_examples )
numpy.random.shuffle(snake_case_ )
A = self.images[perma]
A = self.labels[perma]
# Go to the next epoch
if start + batch_size > self._num_examples:
# Finished epoch
self._epochs_completed += 1
# Get the rest examples in this epoch
A = self._num_examples - start
A = self._images[start : self._num_examples]
A = self._labels[start : self._num_examples]
# Shuffle the data
if shuffle:
A = numpy.arange(self._num_examples )
numpy.random.shuffle(snake_case_ )
A = self.images[perm]
A = self.labels[perm]
# Start next epoch
A = 0
A = batch_size - rest_num_examples
A = self._index_in_epoch
A = self._images[start:end]
A = self._labels[start:end]
return (
numpy.concatenate((images_rest_part, images_new_part) ,axis=0 ),
numpy.concatenate((labels_rest_part, labels_new_part) ,axis=0 ),
)
else:
self._index_in_epoch += batch_size
A = self._index_in_epoch
return self._images[start:end], self._labels[start:end]
@deprecated(snake_case__ , 'Please write your own downloading logic.' )
def _snake_case ( snake_case__ : List[Any] , snake_case__ : int , snake_case__ : Any ):
if not gfile.Exists(snake_case__ ):
gfile.MakeDirs(snake_case__ )
A = os.path.join(snake_case__ , snake_case__ )
if not gfile.Exists(snake_case__ ):
urllib.request.urlretrieve(snake_case__ , snake_case__ ) # noqa: S310
with gfile.GFile(snake_case__ ) as f:
A = f.size()
print('Successfully downloaded' , snake_case__ , snake_case__ , 'bytes.' )
return filepath
@deprecated(
snake_case__ , 'Please use alternatives such as:' ' tensorflow_datasets.load(\'mnist\')' )
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Optional[Any]=False , snake_case__ : Tuple=False , snake_case__ : str=dtypes.floataa , snake_case__ : Union[str, Any]=True , snake_case__ : Union[str, Any]=5000 , snake_case__ : Union[str, Any]=None , snake_case__ : Optional[int]=DEFAULT_SOURCE_URL , ):
if fake_data:
def fake():
return _DataSet(
[] , [] , fake_data=snake_case__ , one_hot=snake_case__ , dtype=snake_case__ , seed=snake_case__ )
A = fake()
A = fake()
A = fake()
return _Datasets(train=snake_case__ , validation=snake_case__ , test=snake_case__ )
if not source_url: # empty string check
A = DEFAULT_SOURCE_URL
A = "train-images-idx3-ubyte.gz"
A = "train-labels-idx1-ubyte.gz"
A = "t10k-images-idx3-ubyte.gz"
A = "t10k-labels-idx1-ubyte.gz"
A = _maybe_download(
snake_case__ , snake_case__ , source_url + train_images_file )
with gfile.Open(snake_case__ , 'rb' ) as f:
A = _extract_images(snake_case__ )
A = _maybe_download(
snake_case__ , snake_case__ , source_url + train_labels_file )
with gfile.Open(snake_case__ , 'rb' ) as f:
A = _extract_labels(snake_case__ , one_hot=snake_case__ )
A = _maybe_download(
snake_case__ , snake_case__ , source_url + test_images_file )
with gfile.Open(snake_case__ , 'rb' ) as f:
A = _extract_images(snake_case__ )
A = _maybe_download(
snake_case__ , snake_case__ , source_url + test_labels_file )
with gfile.Open(snake_case__ , 'rb' ) as f:
A = _extract_labels(snake_case__ , one_hot=snake_case__ )
if not 0 <= validation_size <= len(snake_case__ ):
A = (
"Validation size should be between 0 and "
F'{len(snake_case__ )}. Received: {validation_size}.'
)
raise ValueError(snake_case__ )
A = train_images[:validation_size]
A = train_labels[:validation_size]
A = train_images[validation_size:]
A = train_labels[validation_size:]
A = {"dtype": dtype, "reshape": reshape, "seed": seed}
A = _DataSet(snake_case__ , snake_case__ , **snake_case__ )
A = _DataSet(snake_case__ , snake_case__ , **snake_case__ )
A = _DataSet(snake_case__ , snake_case__ , **snake_case__ )
return _Datasets(train=snake_case__ , validation=snake_case__ , test=snake_case__ ) | 720 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {'''vocab_file''': '''spm_char.model'''}
_lowercase = {
'''vocab_file''': {
'''microsoft/speecht5_asr''': '''https://huggingface.co/microsoft/speecht5_asr/resolve/main/spm_char.model''',
'''microsoft/speecht5_tts''': '''https://huggingface.co/microsoft/speecht5_tts/resolve/main/spm_char.model''',
'''microsoft/speecht5_vc''': '''https://huggingface.co/microsoft/speecht5_vc/resolve/main/spm_char.model''',
}
}
_lowercase = {
'''microsoft/speecht5_asr''': 10_24,
'''microsoft/speecht5_tts''': 10_24,
'''microsoft/speecht5_vc''': 10_24,
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = VOCAB_FILES_NAMES
_lowerCamelCase: List[Any] = PRETRAINED_VOCAB_FILES_MAP
_lowerCamelCase: str = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
_lowerCamelCase: Tuple = ['''input_ids''', '''attention_mask''']
def __init__( self : List[str] ,A_ : int ,A_ : List[str]="<s>" ,A_ : Optional[Any]="</s>" ,A_ : Optional[Any]="<unk>" ,A_ : str="<pad>" ,A_ : Optional[Dict[str, Any]] = None ,**A_ : List[str] ,) -> None:
A = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=A_ ,eos_token=A_ ,unk_token=A_ ,pad_token=A_ ,sp_model_kwargs=self.sp_model_kwargs ,**A_ ,)
A = vocab_file
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(A_ )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> str:
return self.sp_model.get_piece_size()
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[Any]:
A = {self.convert_ids_to_tokens(A_ ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : str ) -> Any:
A = self.__dict__.copy()
A = None
return state
def __setstate__( self : Optional[int] ,A_ : str ) -> Tuple:
A = d
# for backward compatibility
if not hasattr(self ,'sp_model_kwargs' ):
A = {}
A = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ) -> List[str]:
return self.sp_model.encode(A_ ,out_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any] ) -> Union[str, Any]:
return self.sp_model.piece_to_id(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict ) -> List[Any]:
A = self.sp_model.IdToPiece(A_ )
return token
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Optional[Any] ) -> List[str]:
A = []
A = ''
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(A_ ) + token
A = []
else:
current_sub_tokens.append(A_ )
out_string += self.sp_model.decode(A_ )
return out_string.strip()
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : Optional[int]=None ) -> List[int]:
if token_ids_a is None:
return token_ids_a + [self.eos_token_id]
# We don't expect to process pairs, but leave the pair logic for API consistency
return token_ids_a + token_ids_a + [self.eos_token_id]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[int] ,A_ : Optional[List[int]] = None ,A_ : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=A_ ,token_ids_a=A_ ,already_has_special_tokens=A_ )
A = [1]
if token_ids_a is None:
return ([0] * len(A_ )) + suffix_ones
return ([0] * len(A_ )) + ([0] * len(A_ )) + suffix_ones
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : str ,A_ : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(A_ ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
A = os.path.join(
A_ ,(filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(A_ ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,A_ )
elif not os.path.isfile(self.vocab_file ):
with open(A_ ,'wb' ) as fi:
A = self.sp_model.serialized_model_proto()
fi.write(A_ )
return (out_vocab_file,) | 22 | 0 |
"""simple docstring"""
import gc
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
StableDiffusionAttendAndExcitePipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_numpy, skip_mps, slow
from diffusers.utils.testing_utils import require_torch_gpu
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_IMAGE_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineKarrasSchedulerTesterMixin, PipelineLatentTesterMixin, PipelineTesterMixin
_lowercase = False
@skip_mps
class __lowercase ( lowercase__ , lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = StableDiffusionAttendAndExcitePipeline
_lowerCamelCase: List[Any] = False
_lowerCamelCase: Tuple = TEXT_TO_IMAGE_PARAMS
_lowerCamelCase: Optional[int] = TEXT_TO_IMAGE_BATCH_PARAMS.union({'''token_indices'''} )
_lowerCamelCase: str = TEXT_TO_IMAGE_IMAGE_PARAMS
_lowerCamelCase: Optional[Any] = TEXT_TO_IMAGE_IMAGE_PARAMS
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Any ) -> Dict:
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[int] ) -> Optional[Any]:
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
torch.manual_seed(0 )
A = UNetaDConditionModel(
block_out_channels=(32, 64) ,layers_per_block=1 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') ,up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') ,cross_attention_dim=32 ,attention_head_dim=(2, 4) ,use_linear_projection=UpperCAmelCase__ ,)
A = DDIMScheduler(
beta_start=0.0_00_85 ,beta_end=0.0_12 ,beta_schedule='scaled_linear' ,clip_sample=UpperCAmelCase__ ,set_alpha_to_one=UpperCAmelCase__ ,)
torch.manual_seed(0 )
A = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] ,up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
A = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act='gelu' ,projection_dim=512 ,)
A = CLIPTextModel(UpperCAmelCase__ )
A = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
A = {
'''unet''': unet,
'''scheduler''': scheduler,
'''vae''': vae,
'''text_encoder''': text_encoder,
'''tokenizer''': tokenizer,
'''safety_checker''': None,
'''feature_extractor''': None,
}
return components
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : List[Any] ,A_ : Optional[int]=0 ) -> Dict:
if str(UpperCAmelCase__ ).startswith('mps' ):
A = torch.manual_seed(UpperCAmelCase__ )
else:
A = torch.Generator(device=UpperCAmelCase__ ).manual_seed(UpperCAmelCase__ )
A = {
'''prompt''': '''a cat and a frog''',
'''token_indices''': [2, 5],
'''generator''': generator,
'''num_inference_steps''': 1,
'''guidance_scale''': 6.0,
'''output_type''': '''numpy''',
'''max_iter_to_alter''': 2,
'''thresholds''': {0: 0.7},
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
A = '''cpu'''
A = self.get_dummy_components()
A = self.pipeline_class(**UpperCAmelCase__ )
pipe.to(UpperCAmelCase__ )
pipe.set_progress_bar_config(disable=UpperCAmelCase__ )
A = self.get_dummy_inputs(UpperCAmelCase__ )
A = pipe(**UpperCAmelCase__ ).images
A = image[0, -3:, -3:, -1]
self.assertEqual(image.shape ,(1, 64, 64, 3) )
A = np.array(
[0.63_90_53_64, 0.62_89_73_07, 0.48_59_90_17, 0.5_13_36_24, 0.5_55_00_48, 0.45_76_95_16, 0.50_32_69_73, 0.5_02_31_39, 0.45_38_44_96] )
A = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(UpperCAmelCase__ ,1e-3 )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
super().test_cpu_offload_forward_pass(expected_max_diff=5e-4 )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
self._test_inference_batch_single_identical(batch_size=2 ,expected_max_diff=7e-4 )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Union[str, Any]:
super().test_dict_tuple_outputs_equivalent(expected_max_difference=3e-3 )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
super().test_pt_np_pil_outputs_equivalent(expected_max_diff=5e-4 )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[str]:
super().test_save_load_local(expected_max_difference=5e-4 )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Any:
super().test_save_load_optional_components(expected_max_difference=4e-4 )
@require_torch_gpu
@slow
class __lowercase ( unittest.TestCase ):
'''simple docstring'''
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Tuple ) -> int:
super().setUpClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
@classmethod
def _SCREAMING_SNAKE_CASE ( cls : Optional[Any] ) -> List[str]:
super().tearDownClass()
torch.use_deterministic_algorithms(UpperCAmelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[Any]:
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[str]:
A = torch.manual_seed(51 )
A = StableDiffusionAttendAndExcitePipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,safety_checker=UpperCAmelCase__ ,torch_dtype=torch.floataa )
pipe.to('cuda' )
A = '''a painting of an elephant with glasses'''
A = [5, 7]
A = pipe(
prompt=UpperCAmelCase__ ,token_indices=UpperCAmelCase__ ,guidance_scale=7.5 ,generator=UpperCAmelCase__ ,num_inference_steps=5 ,max_iter_to_alter=5 ,output_type='numpy' ,).images[0]
A = load_numpy(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/attend-and-excite/elephant_glasses.npy' )
assert np.abs((expected_image - image).max() ) < 5e-1 | 721 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''CLIPFeatureExtractor''']
_lowercase = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''facebook/s2t-small-librispeech-asr''': (
'''https://huggingface.co/facebook/s2t-small-librispeech-asr/resolve/main/config.json'''
),
# See all Speech2Text models at https://huggingface.co/models?filter=speech_to_text
}
class lowerCAmelCase_ ( __UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = "speech_to_text"
_lowerCamelCase: List[str] = ["past_key_values"]
_lowerCamelCase: Tuple = {"num_attention_heads": "encoder_attention_heads", "hidden_size": "d_model"}
def __init__( self : List[str] ,A_ : Optional[Any]=1_0000 ,A_ : List[Any]=12 ,A_ : List[str]=2048 ,A_ : int=4 ,A_ : str=6 ,A_ : Union[str, Any]=2048 ,A_ : Optional[Any]=4 ,A_ : Dict=0.0 ,A_ : Tuple=0.0 ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : List[Any]="relu" ,A_ : Any=256 ,A_ : int=0.1 ,A_ : Any=0.0 ,A_ : Optional[Any]=0.0 ,A_ : int=0.02 ,A_ : Optional[int]=2 ,A_ : Union[str, Any]=True ,A_ : Optional[Any]=1 ,A_ : Optional[int]=0 ,A_ : int=2 ,A_ : Tuple=6000 ,A_ : Optional[int]=1024 ,A_ : List[str]=2 ,A_ : List[Any]=(5, 5) ,A_ : List[Any]=1024 ,A_ : Union[str, Any]=80 ,A_ : Tuple=1 ,**A_ : Union[str, Any] ,) -> Optional[int]:
A = vocab_size
A = d_model
A = encoder_ffn_dim
A = encoder_layers
A = encoder_attention_heads
A = decoder_ffn_dim
A = decoder_layers
A = decoder_attention_heads
A = dropout
A = attention_dropout
A = activation_dropout
A = activation_function
A = init_std
A = encoder_layerdrop
A = decoder_layerdrop
A = use_cache
A = encoder_layers
A = scale_embedding # scale factor will be sqrt(d_model) if True
A = max_source_positions
A = max_target_positions
A = num_conv_layers
A = list(_lowerCamelCase )
A = conv_channels
A = input_feat_per_channel
A = input_channels
if len(self.conv_kernel_sizes ) != self.num_conv_layers:
raise ValueError(
'Configuration for convolutional module is incorrect. '
'It is required that `len(config.conv_kernel_sizes)` == `config.num_conv_layers` '
F'but is `len(config.conv_kernel_sizes) = {len(self.conv_kernel_sizes )}`, '
F'`config.num_conv_layers = {self.num_conv_layers}`.' )
super().__init__(
pad_token_id=_lowerCamelCase ,bos_token_id=_lowerCamelCase ,eos_token_id=_lowerCamelCase ,is_encoder_decoder=_lowerCamelCase ,decoder_start_token_id=_lowerCamelCase ,**_lowerCamelCase ,) | 700 |
"""simple docstring"""
import unittest
from transformers import XLMConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
XLMForMultipleChoice,
XLMForQuestionAnswering,
XLMForQuestionAnsweringSimple,
XLMForSequenceClassification,
XLMForTokenClassification,
XLMModel,
XLMWithLMHeadModel,
)
from transformers.models.xlm.modeling_xlm import XLM_PRETRAINED_MODEL_ARCHIVE_LIST
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any ,A_ : Optional[Any] ,A_ : Tuple=13 ,A_ : Optional[Any]=7 ,A_ : Dict=True ,A_ : Optional[Any]=True ,A_ : str=True ,A_ : Union[str, Any]=True ,A_ : Optional[Any]=True ,A_ : Tuple=False ,A_ : Optional[int]=False ,A_ : str=False ,A_ : int=2 ,A_ : Union[str, Any]=99 ,A_ : int=0 ,A_ : Dict=32 ,A_ : List[str]=5 ,A_ : Any=4 ,A_ : str=0.1 ,A_ : Any=0.1 ,A_ : int=512 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.02 ,A_ : Optional[Any]=2 ,A_ : List[str]=4 ,A_ : Optional[int]="last" ,A_ : str=True ,A_ : List[str]=None ,A_ : List[Any]=0 ,) -> int:
A = parent
A = batch_size
A = seq_length
A = is_training
A = use_input_lengths
A = use_token_type_ids
A = use_labels
A = gelu_activation
A = sinusoidal_embeddings
A = causal
A = asm
A = n_langs
A = vocab_size
A = n_special
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = max_position_embeddings
A = type_sequence_label_size
A = initializer_range
A = num_labels
A = num_choices
A = summary_type
A = use_proj
A = scope
A = bos_token_id
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Dict:
A = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A = random_attention_mask([self.batch_size, self.seq_length] )
A = None
if self.use_input_lengths:
A = (
ids_tensor([self.batch_size] ,vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
A = None
if self.use_token_type_ids:
A = ids_tensor([self.batch_size, self.seq_length] ,self.n_langs )
A = None
A = None
A = None
if self.use_labels:
A = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A = ids_tensor([self.batch_size] ,2 ).float()
A = ids_tensor([self.batch_size] ,self.num_choices )
A = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Any:
return XLMConfig(
vocab_size=self.vocab_size ,n_special=self.n_special ,emb_dim=self.hidden_size ,n_layers=self.num_hidden_layers ,n_heads=self.num_attention_heads ,dropout=self.hidden_dropout_prob ,attention_dropout=self.attention_probs_dropout_prob ,gelu_activation=self.gelu_activation ,sinusoidal_embeddings=self.sinusoidal_embeddings ,asm=self.asm ,causal=self.causal ,n_langs=self.n_langs ,max_position_embeddings=self.max_position_embeddings ,initializer_range=self.initializer_range ,summary_type=self.summary_type ,use_proj=self.use_proj ,num_labels=self.num_labels ,bos_token_id=self.bos_token_id ,)
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : int ,A_ : Dict ,A_ : Optional[Any] ,A_ : Optional[Any] ,A_ : Any ,A_ : List[str] ,A_ : Optional[int] ,) -> Tuple:
A = XLMModel(config=A_ )
model.to(A_ )
model.eval()
A = model(A_ ,lengths=A_ ,langs=A_ )
A = model(A_ ,langs=A_ )
A = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : str ,A_ : Union[str, Any] ,A_ : Any ,A_ : Any ,A_ : Any ,A_ : Union[str, Any] ,A_ : List[str] ,A_ : List[str] ,A_ : List[str] ,) -> Union[str, Any]:
A = XLMWithLMHeadModel(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,token_type_ids=A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ,A_ : Tuple ,A_ : str ,A_ : int ,A_ : str ,A_ : Optional[Any] ,A_ : Any ,A_ : Any ,A_ : Dict ,) -> List[str]:
A = XLMForQuestionAnsweringSimple(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
A = outputs
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[int] ,A_ : Dict ,A_ : Optional[Any] ,A_ : List[Any] ,A_ : List[str] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : str ,A_ : Any ,) -> Optional[int]:
A = XLMForQuestionAnswering(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,p_mask=A_ ,)
A = model(
A_ ,start_positions=A_ ,end_positions=A_ ,cls_index=A_ ,is_impossible=A_ ,)
((A) , ) = result_with_labels.to_tuple()
A = model(A_ ,start_positions=A_ ,end_positions=A_ )
((A) , ) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape ,() )
self.parent.assertEqual(result.start_top_log_probs.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape ,(self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape ,(self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape ,(self.batch_size,) )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Union[str, Any] ,A_ : Tuple ,A_ : int ,A_ : Union[str, Any] ,A_ : List[Any] ,A_ : Optional[Any] ,A_ : Tuple ,A_ : Union[str, Any] ,A_ : str ,) -> List[Any]:
A = XLMForSequenceClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ )
A = model(A_ ,labels=A_ )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.type_sequence_label_size) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ,A_ : str ,A_ : Tuple ,A_ : List[str] ,A_ : Dict ,A_ : Dict ,A_ : Union[str, Any] ,A_ : Dict ,A_ : Any ,) -> Any:
A = self.num_labels
A = XLMForTokenClassification(A_ )
model.to(A_ )
model.eval()
A = model(A_ ,attention_mask=A_ ,labels=A_ )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ,A_ : str ,A_ : int ,A_ : Tuple ,A_ : List[Any] ,A_ : List[str] ,A_ : List[str] ,A_ : Optional[Any] ,A_ : int ,) -> Tuple:
A = self.num_choices
A = XLMForMultipleChoice(config=A_ )
model.to(A_ )
model.eval()
A = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A = model(
A_ ,attention_mask=A_ ,token_type_ids=A_ ,labels=A_ ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.prepare_config_and_inputs()
(
(
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) , (
A
) ,
) = config_and_inputs
A = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'lengths': input_lengths}
return config, inputs_dict
@require_torch
class lowerCAmelCase_ ( _lowercase , _lowercase , _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Dict = (
(
XLMModel,
XLMWithLMHeadModel,
XLMForQuestionAnswering,
XLMForSequenceClassification,
XLMForQuestionAnsweringSimple,
XLMForTokenClassification,
XLMForMultipleChoice,
)
if is_torch_available()
else ()
)
_lowerCamelCase: Dict = (
(XLMWithLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Check other models whether language generation is also applicable
_lowerCamelCase: Optional[Any] = (
{
'''feature-extraction''': XLMModel,
'''fill-mask''': XLMWithLMHeadModel,
'''question-answering''': XLMForQuestionAnsweringSimple,
'''text-classification''': XLMForSequenceClassification,
'''text-generation''': XLMWithLMHeadModel,
'''token-classification''': XLMForTokenClassification,
'''zero-shot''': XLMForSequenceClassification,
}
if is_torch_available()
else {}
)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : Any ,A_ : str ) -> Tuple:
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith('Fast' )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[int] ,A_ : Tuple ,A_ : str=False ) -> Dict:
A = super()._prepare_for_class(A_ ,A_ ,return_labels=A_ )
if return_labels:
if model_class.__name__ == "XLMForQuestionAnswering":
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
A = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=A_ )
return inputs_dict
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[str]:
A = XLMModelTester(self )
A = ConfigTester(self ,config_class=A_ ,emb_dim=37 )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> str:
self.config_tester.run_common_tests()
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_model(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_lm_head(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_simple_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_qa(*A_ )
def _SCREAMING_SNAKE_CASE ( self : str ) -> List[Any]:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_sequence_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Tuple:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_token_classif(*A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_xlm_for_multiple_choice(*A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Dict ,A_ : List[Any] ,A_ : List[Any] ,A_ : Dict ,A_ : Union[str, Any] ,A_ : List[Any]=False ,A_ : Tuple=1 ) -> List[Any]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_attentions in attentions] ,[True] * len(A_ ) )
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_attentions in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = min_length + idx + 1
A = (
batch_size * num_beam_groups,
config.num_attention_heads,
tgt_len,
src_len,
)
# check attn size
self.assertListEqual(
[layer_attention.shape for layer_attention in iter_attentions] ,[expected_shape] * len(A_ ) )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ,A_ : List[str] ,A_ : Dict ,A_ : Optional[Any] ,A_ : Dict ,A_ : Tuple=False ,A_ : Optional[Any]=1 ) -> List[str]:
self.assertIsInstance(A_ ,A_ )
self.assertListEqual(
[isinstance(A_ ,A_ ) for iter_hidden_states in hidden_states] ,[True] * len(A_ ) ,)
self.assertEqual(len(A_ ) ,(max_length - min_length) * num_beam_groups )
for idx, iter_hidden_states in enumerate(A_ ):
# adds PAD dummy token
A = min_length + idx + 1
A = (batch_size * num_beam_groups, seq_len, config.hidden_size)
# check hidden size
self.assertListEqual(
[layer_hidden_states.shape for layer_hidden_states in iter_hidden_states] ,[expected_shape] * len(A_ ) ,)
pass
@slow
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
for model_name in XLM_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A = XLMModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
@require_torch
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@slow
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = XLMWithLMHeadModel.from_pretrained('xlm-mlm-en-2048' )
model.to(A_ )
A = torch.tensor([[14, 447]] ,dtype=torch.long ,device=A_ ) # the president
A = [
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
14,
447,
] # the president the president the president the president the president the president the president the president the president the president
# TODO(PVP): this and other input_ids I tried for generation give pretty bad results. Not sure why. Model might just not be made for auto-regressive inference
A = model.generate(A_ ,do_sample=A_ )
self.assertListEqual(output_ids[0].cpu().numpy().tolist() ,A_ ) | 22 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Iterable, Iterator
from dataclasses import dataclass
_lowercase = (3, 9, -11, 0, 7, 5, 1, -1)
_lowercase = (4, 6, 2, 0, 8, 10, 3, -2)
@dataclass
class lowerCAmelCase_ :
'''simple docstring'''
_lowerCamelCase: Dict = 42
_lowerCamelCase: int = 42
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ,A_ : int ) -> None:
A = None
for i in sorted(__lowerCamelCase ,reverse=__lowerCamelCase ):
A = Node(__lowerCamelCase ,self.head )
def __iter__( self : str ) -> Iterator[int]:
A = self.head
while node:
yield node.data
A = node.next_node
def __len__( self : Dict ) -> int:
return sum(1 for _ in self )
def __str__( self : int ) -> str:
return " -> ".join([str(__lowerCamelCase ) for node in self] )
def _snake_case ( snake_case__ : SortedLinkedList , snake_case__ : SortedLinkedList ):
return SortedLinkedList(list(lowerCamelCase_ ) + list(lowerCamelCase_ ) )
if __name__ == "__main__":
import doctest
doctest.testmod()
_lowercase = SortedLinkedList
print(merge_lists(SSL(test_data_odd), SSL(test_data_even))) | 701 |
"""simple docstring"""
from typing import Optional, Tuple, Union
import torch
from einops import rearrange, reduce
from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNetaDConditionModel
from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
_lowercase = 8
def _snake_case ( snake_case__ : Tuple , snake_case__ : Optional[int]=BITS ):
A = x.device
A = (x * 255).int().clamp(0 , 255 )
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b c h w -> b c 1 h w' )
A = ((x & mask) != 0).float()
A = rearrange(snake_case__ , 'b c d h w -> b (c d) h w' )
A = bits * 2 - 1
return bits
def _snake_case ( snake_case__ : Any , snake_case__ : Any=BITS ):
A = x.device
A = (x > 0).int()
A = 2 ** torch.arange(bits - 1 , -1 , -1 , device=snake_case__ , dtype=torch.intaa )
A = rearrange(snake_case__ , 'd -> d 1 1' )
A = rearrange(snake_case__ , 'b (c d) h w -> b c d h w' , d=8 )
A = reduce(x * mask , 'b c d h w -> b c h w' , 'sum' )
return (dec / 255).clamp(0.0 , 1.0 )
def _snake_case ( self : Optional[int] , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : float = 0.0 , snake_case__ : bool = True , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
if self.num_inference_steps is None:
raise ValueError(
'Number of inference steps is \'None\', you need to run \'set_timesteps\' after creating the scheduler' )
# See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
# Ideally, read DDIM paper in-detail understanding
# Notation (<variable name> -> <name in paper>
# - pred_noise_t -> e_theta(x_t, t)
# - pred_original_sample -> f_theta(x_t, t) or x_0
# - std_dev_t -> sigma_t
# - eta -> η
# - pred_sample_direction -> "direction pointing to x_t"
# - pred_prev_sample -> "x_t-1"
# 1. get previous step value (=t-1)
A = timestep - self.config.num_train_timesteps // self.num_inference_steps
# 2. compute alphas, betas
A = self.alphas_cumprod[timestep]
A = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
A = 1 - alpha_prod_t
# 3. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
# 4. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 5. compute variance: "sigma_t(η)" -> see formula (16)
# σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
A = self._get_variance(snake_case__ , snake_case__ )
A = eta * variance ** 0.5
if use_clipped_model_output:
# the model_output is always re-derived from the clipped x_0 in Glide
A = (sample - alpha_prod_t ** 0.5 * pred_original_sample) / beta_prod_t ** 0.5
# 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output
# 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
A = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction
if eta > 0:
# randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
A = model_output.device if torch.is_tensor(snake_case__ ) else 'cpu'
A = torch.randn(model_output.shape , dtype=model_output.dtype , generator=snake_case__ ).to(snake_case__ )
A = self._get_variance(snake_case__ , snake_case__ ) ** 0.5 * eta * noise
A = prev_sample + variance
if not return_dict:
return (prev_sample,)
return DDIMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
def _snake_case ( self : Dict , snake_case__ : torch.FloatTensor , snake_case__ : int , snake_case__ : torch.FloatTensor , snake_case__ : Tuple="epsilon" , snake_case__ : List[str]=None , snake_case__ : bool = True , ):
A = timestep
if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
A , A = torch.split(snake_case__ , sample.shape[1] , dim=1 )
else:
A = None
# 1. compute alphas, betas
A = self.alphas_cumprod[t]
A = self.alphas_cumprod[t - 1] if t > 0 else self.one
A = 1 - alpha_prod_t
A = 1 - alpha_prod_t_prev
# 2. compute predicted original sample from predicted noise also called
# "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf
if prediction_type == "epsilon":
A = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5
elif prediction_type == "sample":
A = model_output
else:
raise ValueError(F'Unsupported prediction_type {prediction_type}.' )
# 3. Clip "predicted x_0"
A = self.bit_scale
if self.config.clip_sample:
A = torch.clamp(snake_case__ , -scale , snake_case__ )
# 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = (alpha_prod_t_prev ** 0.5 * self.betas[t]) / beta_prod_t
A = self.alphas[t] ** 0.5 * beta_prod_t_prev / beta_prod_t
# 5. Compute predicted previous sample µ_t
# See formula (7) from https://arxiv.org/pdf/2006.11239.pdf
A = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
# 6. Add noise
A = 0
if t > 0:
A = torch.randn(
model_output.size() , dtype=model_output.dtype , layout=model_output.layout , generator=snake_case__ ).to(model_output.device )
A = (self._get_variance(snake_case__ , predicted_variance=snake_case__ ) ** 0.5) * noise
A = pred_prev_sample + variance
if not return_dict:
return (pred_prev_sample,)
return DDPMSchedulerOutput(prev_sample=snake_case__ , pred_original_sample=snake_case__ )
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,A_ : UNetaDConditionModel ,A_ : Union[DDIMScheduler, DDPMScheduler] ,A_ : Optional[float] = 1.0 ,) -> Optional[int]:
super().__init__()
A = bit_scale
A = (
ddim_bit_scheduler_step if isinstance(A_ ,A_ ) else ddpm_bit_scheduler_step
)
self.register_modules(unet=A_ ,scheduler=A_ )
@torch.no_grad()
def __call__( self : Tuple ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 256 ,A_ : Optional[int] = 50 ,A_ : Optional[torch.Generator] = None ,A_ : Optional[int] = 1 ,A_ : Optional[str] = "pil" ,A_ : bool = True ,**A_ : Optional[Any] ,) -> Union[Tuple, ImagePipelineOutput]:
A = torch.randn(
(batch_size, self.unet.config.in_channels, height, width) ,generator=A_ ,)
A = decimal_to_bits(A_ ) * self.bit_scale
A = latents.to(self.device )
self.scheduler.set_timesteps(A_ )
for t in self.progress_bar(self.scheduler.timesteps ):
# predict the noise residual
A = self.unet(A_ ,A_ ).sample
# compute the previous noisy sample x_t -> x_t-1
A = self.scheduler.step(A_ ,A_ ,A_ ).prev_sample
A = bits_to_decimal(A_ )
if output_type == "pil":
A = self.numpy_to_pil(A_ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=A_ ) | 22 | 0 |
"""simple docstring"""
from __future__ import annotations
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Tuple ,A_ : str ,A_ : str ) -> Tuple:
A , A = text, pattern
A , A = len(_lowercase ), len(_lowercase )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : str ) -> Dict:
for i in range(self.patLen - 1 ,-1 ,-1 ):
if char == self.pattern[i]:
return i
return -1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> Tuple:
for i in range(self.patLen - 1 ,-1 ,-1 ):
if self.pattern[i] != self.text[current_pos + i]:
return current_pos + i
return -1
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
# searches pattern in text and returns index positions
A = []
for i in range(self.textLen - self.patLen + 1 ):
A = self.mismatch_in_text(_lowercase )
if mismatch_index == -1:
positions.append(_lowercase )
else:
A = self.match_in_pattern(self.text[mismatch_index] )
A = (
mismatch_index - match_index
) # shifting index lgtm [py/multiple-definition]
return positions
_lowercase = "ABAABA"
_lowercase = "AB"
_lowercase = BoyerMooreSearch(text, pattern)
_lowercase = bms.bad_character_heuristic()
if len(positions) == 0:
print('''No match found''')
else:
print('''Pattern found in following positions: ''')
print(positions) | 702 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''hustvl/yolos-small''': '''https://huggingface.co/hustvl/yolos-small/resolve/main/config.json''',
# See all YOLOS models at https://huggingface.co/models?filter=yolos
}
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: List[str] = '''yolos'''
def __init__( self : Dict ,A_ : Optional[Any]=768 ,A_ : int=12 ,A_ : List[str]=12 ,A_ : str=3072 ,A_ : Tuple="gelu" ,A_ : Dict=0.0 ,A_ : List[Any]=0.0 ,A_ : Any=0.02 ,A_ : str=1e-12 ,A_ : List[Any]=[512, 864] ,A_ : Union[str, Any]=16 ,A_ : List[str]=3 ,A_ : Optional[int]=True ,A_ : Tuple=100 ,A_ : str=True ,A_ : Optional[Any]=False ,A_ : Any=1 ,A_ : Optional[Any]=5 ,A_ : Optional[Any]=2 ,A_ : Optional[int]=5 ,A_ : List[Any]=2 ,A_ : Union[str, Any]=0.1 ,**A_ : Tuple ,) -> Any:
super().__init__(**A_ )
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = patch_size
A = num_channels
A = qkv_bias
A = num_detection_tokens
A = use_mid_position_embeddings
A = auxiliary_loss
# Hungarian matcher
A = class_cost
A = bbox_cost
A = giou_cost
# Loss coefficients
A = bbox_loss_coefficient
A = giou_loss_coefficient
A = eos_coefficient
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Any = version.parse('''1.11''' )
@property
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> float:
return 1e-4
@property
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
return 12 | 22 | 0 |
"""simple docstring"""
from abc import ABC, abstractmethod
from typing import List, Optional
class lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
def __init__( self : int ) -> Tuple:
# test for the above condition
self.test()
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[int]:
A = 0
A = False
while not completed:
if counter == 1:
self.reset()
A = self.advance()
if not self.does_advance(lowercase_ ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
A = self.update(lowercase_ )
counter += 1
if counter > 1_0000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def _SCREAMING_SNAKE_CASE ( self : int ) -> List[Any]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : str ) -> str:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Dict ) -> Union[str, Any]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Optional[int]=False ) -> Tuple:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
def __init__( self : Dict ,A_ : Union[str, Any] ) -> List[Any]:
super(lowercase_ ,self ).__init__()
if not isinstance(lowercase_ ,lowercase_ ) or len(lowercase_ ) == 0:
raise ValueError(F'`token_ids` has to be a non-empty list, but is {token_ids}.' )
if any((not isinstance(lowercase_ ,lowercase_ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' )
A = token_ids
A = len(self.token_ids )
A = -1 # the index of the currently fulfilled step
A = False
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> int:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[str] ) -> Tuple:
if not isinstance(lowercase_ ,lowercase_ ):
raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(lowercase_ )}' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : int ) -> int:
if not isinstance(lowercase_ ,lowercase_ ):
raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(lowercase_ )}' )
A = False
A = False
A = False
if self.does_advance(lowercase_ ):
self.fulfilled_idx += 1
A = True
if self.fulfilled_idx == (self.seqlen - 1):
A = True
A = completed
else:
# failed to make progress.
A = True
self.reset()
return stepped, completed, reset
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = False
A = 0
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[Any]:
return self.seqlen - (self.fulfilled_idx + 1)
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Optional[Any]=False ) -> List[str]:
A = PhrasalConstraint(self.token_ids )
if stateful:
A = self.seqlen
A = self.fulfilled_idx
A = self.completed
return new_constraint
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Any ,A_ : int ,A_ : Optional[Any]=True ) -> List[str]:
A = max([len(lowercase_ ) for one in nested_token_ids] )
A = {}
for token_ids in nested_token_ids:
A = root
for tidx, token_id in enumerate(lowercase_ ):
if token_id not in level:
A = {}
A = level[token_id]
if no_subsets and self.has_subsets(lowercase_ ,lowercase_ ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F' {nested_token_ids}.' )
A = root
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : List[Any] ) -> Union[str, Any]:
A = self.trie
for current_token in current_seq:
A = start[current_token]
A = list(start.keys() )
return next_tokens
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple ) -> str:
A = self.next_tokens(lowercase_ )
return len(lowercase_ ) == 0
def _SCREAMING_SNAKE_CASE ( self : Tuple ,A_ : Tuple ) -> str:
A = list(root.values() )
if len(lowercase_ ) == 0:
return 1
else:
return sum([self.count_leaves(lowercase_ ) for nn in next_nodes] )
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : int ,A_ : Optional[int] ) -> Tuple:
A = self.count_leaves(lowercase_ )
return len(lowercase_ ) != leaf_count
class lowerCAmelCase_ ( snake_case__ ):
'''simple docstring'''
def __init__( self : Any ,A_ : Tuple ) -> Optional[int]:
super(lowercase_ ,self ).__init__()
if not isinstance(lowercase_ ,lowercase_ ) or len(lowercase_ ) == 0:
raise ValueError(F'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' )
if any(not isinstance(lowercase_ ,lowercase_ ) for token_ids in nested_token_ids ):
raise ValueError(F'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' )
if any(
any((not isinstance(lowercase_ ,lowercase_ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' )
A = DisjunctiveTrie(lowercase_ )
A = nested_token_ids
A = self.trie.max_height
A = []
A = False
def _SCREAMING_SNAKE_CASE ( self : int ) -> Dict:
A = self.trie.next_tokens(self.current_seq )
if len(lowercase_ ) == 0:
return None
else:
return token_list
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> str:
if not isinstance(lowercase_ ,lowercase_ ):
raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowercase_ )}' )
A = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : List[Any] ) -> str:
if not isinstance(lowercase_ ,lowercase_ ):
raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowercase_ )}' )
A = False
A = False
A = False
if self.does_advance(lowercase_ ):
self.current_seq.append(lowercase_ )
A = True
else:
A = True
self.reset()
A = self.trie.reached_leaf(self.current_seq )
A = completed
return stepped, completed, reset
def _SCREAMING_SNAKE_CASE ( self : int ) -> Tuple:
A = False
A = []
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : int=False ) -> Dict:
A = DisjunctiveConstraint(self.token_ids )
if stateful:
A = self.seqlen
A = self.current_seq
A = self.completed
return new_constraint
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ,A_ : Optional[Any] ) -> Union[str, Any]:
A = constraints
# max # of steps required to fulfill a given constraint
A = max([c.seqlen for c in constraints] )
A = len(lowercase_ )
A = False
self.init_state()
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
A = []
A = None
A = [constraint.copy(stateful=lowercase_ ) for constraint in self.constraints]
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
A = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
A = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
A = constraint.advance()
if isinstance(lowercase_ ,lowercase_ ):
token_list.append(lowercase_ )
elif isinstance(lowercase_ ,lowercase_ ):
token_list.extend(lowercase_ )
else:
A = self.inprogress_constraint.advance()
if isinstance(lowercase_ ,lowercase_ ):
token_list.append(lowercase_ )
elif isinstance(lowercase_ ,lowercase_ ):
token_list.extend(lowercase_ )
if len(lowercase_ ) == 0:
return None
else:
return token_list
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any] ) -> Union[str, Any]:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
A = self.add(lowercase_ )
# the entire list of constraints are fulfilled
if self.completed:
break
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : int ) -> Optional[int]:
if not isinstance(lowercase_ ,lowercase_ ):
raise ValueError(F'`token_id` should be an `int`, but is `{token_id}`.' )
A = False, False
if self.completed:
A = True
A = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
A = self.inprogress_constraint.update(lowercase_ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=lowercase_ ) )
A = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
A = None
if len(self.pending_constraints ) == 0:
# we're done!
A = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(lowercase_ ):
A = pending_constraint.update(lowercase_ )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(lowercase_ )
A = None
if not complete and stepped:
A = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
A = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
A = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : Dict=True ) -> Optional[int]:
A = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
A = [
constraint.copy(stateful=lowercase_ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
A = self.inprogress_constraint.copy(stateful=lowercase_ )
A = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 703 |
"""simple docstring"""
import random
import unittest
import numpy as np
from diffusers import (
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
OnnxStableDiffusionImgaImgPipeline,
PNDMScheduler,
)
from diffusers.utils import floats_tensor
from diffusers.utils.testing_utils import (
is_onnx_available,
load_image,
nightly,
require_onnxruntime,
require_torch_gpu,
)
from ..test_pipelines_onnx_common import OnnxPipelineTesterMixin
if is_onnx_available():
import onnxruntime as ort
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Tuple = '''hf-internal-testing/tiny-random-OnnxStableDiffusionPipeline'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=0 ) -> Dict:
A = floats_tensor((1, 3, 128, 128) ,rng=random.Random(A_ ) )
A = np.random.RandomState(A_ )
A = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'generator': generator,
'num_inference_steps': 3,
'strength': 0.75,
'guidance_scale': 7.5,
'output_type': 'numpy',
}
return inputs
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1].flatten()
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_96_43, 0.5_84_84, 0.5_03_14, 0.5_87_60, 0.5_53_68, 0.5_96_43, 0.5_15_29, 0.4_12_17, 0.4_90_87] )
assert np.abs(image_slice - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Union[str, Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = PNDMScheduler.from_config(pipe.scheduler.config ,skip_prk_steps=A_ )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_17_37, 0.5_46_42, 0.5_31_83, 0.5_44_65, 0.5_27_42, 0.6_05_25, 0.4_99_69, 0.4_06_55, 0.4_81_54] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = LMSDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
# warmup pass to apply optimizations
A = pipe(**self.get_dummy_inputs() )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_27_61, 0.5_99_77, 0.4_90_33, 0.4_96_19, 0.5_42_82, 0.5_03_11, 0.4_76_00, 0.4_09_18, 0.4_52_03] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.5_29_11, 0.6_00_04, 0.4_92_29, 0.4_98_05, 0.5_45_02, 0.5_06_80, 0.4_77_77, 0.4_10_28, 0.4_53_04] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
def _SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(self.hub_checkpoint ,provider='CPUExecutionProvider' )
A = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
pipe.set_progress_bar_config(disable=A_ )
A = self.get_dummy_inputs()
A = pipe(**A_ ).images
A = image[0, -3:, -3:, -1]
assert image.shape == (1, 128, 128, 3)
A = np.array([0.6_53_31, 0.5_82_77, 0.4_82_04, 0.5_60_59, 0.5_36_65, 0.5_62_35, 0.5_09_69, 0.4_00_09, 0.4_65_52] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
@nightly
@require_onnxruntime
@require_torch_gpu
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
@property
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
return (
"CUDAExecutionProvider",
{
"gpu_mem_limit": "15000000000", # 15GB
"arena_extend_strategy": "kSameAsRequested",
},
)
@property
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
A = ort.SessionOptions()
A = False
return options
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
# using the PNDM scheduler by default
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'CompVis/stable-diffusion-v1-4' ,revision='onnx' ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=10 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.49_09, 0.50_59, 0.53_72, 0.46_23, 0.48_76, 0.50_49, 0.48_20, 0.49_56, 0.50_19] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Any:
A = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/img2img/sketch-mountains-input.jpg' )
A = init_image.resize((768, 512) )
A = LMSDiscreteScheduler.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,subfolder='scheduler' ,revision='onnx' )
A = OnnxStableDiffusionImgaImgPipeline.from_pretrained(
'runwayml/stable-diffusion-v1-5' ,revision='onnx' ,scheduler=A_ ,safety_checker=A_ ,feature_extractor=A_ ,provider=self.gpu_provider ,sess_options=self.gpu_options ,)
pipe.set_progress_bar_config(disable=A_ )
A = 'A fantasy landscape, trending on artstation'
A = np.random.RandomState(0 )
A = pipe(
prompt=A_ ,image=A_ ,strength=0.75 ,guidance_scale=7.5 ,num_inference_steps=20 ,generator=A_ ,output_type='np' ,)
A = output.images
A = images[0, 255:258, 383:386, -1]
assert images.shape == (1, 512, 768, 3)
A = np.array([0.80_43, 0.9_26, 0.95_81, 0.81_19, 0.89_54, 0.9_13, 0.72_09, 0.74_63, 0.74_31] )
# TODO: lower the tolerance after finding the cause of onnxruntime reproducibility issues
assert np.abs(image_slice.flatten() - expected_slice ).max() < 2e-2 | 22 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, apply_forward_hook
from .modeling_utils import ModelMixin
from .vae import Decoder, DecoderOutput, Encoder, VectorQuantizer
@dataclass
class lowerCAmelCase_ ( _UpperCAmelCase ):
'''simple docstring'''
_lowerCamelCase: torch.FloatTensor
class lowerCAmelCase_ ( _UpperCAmelCase , _UpperCAmelCase ):
'''simple docstring'''
@register_to_config
def __init__( self : str ,A_ : int = 3 ,A_ : int = 3 ,A_ : Tuple[str] = ("DownEncoderBlock2D",) ,A_ : Tuple[str] = ("UpDecoderBlock2D",) ,A_ : Tuple[int] = (64,) ,A_ : int = 1 ,A_ : str = "silu" ,A_ : int = 3 ,A_ : int = 32 ,A_ : int = 256 ,A_ : int = 32 ,A_ : Optional[int] = None ,A_ : float = 0.1_82_15 ,A_ : str = "group" ,) -> List[str]:
super().__init__()
# pass init params to Encoder
A = Encoder(
in_channels=A_ ,out_channels=A_ ,down_block_types=A_ ,block_out_channels=A_ ,layers_per_block=A_ ,act_fn=A_ ,norm_num_groups=A_ ,double_z=A_ ,)
A = vq_embed_dim if vq_embed_dim is not None else latent_channels
A = nn.Convad(A_ ,A_ ,1 )
A = VectorQuantizer(A_ ,A_ ,beta=0.25 ,remap=A_ ,sane_index_shape=A_ )
A = nn.Convad(A_ ,A_ ,1 )
# pass init params to Decoder
A = Decoder(
in_channels=A_ ,out_channels=A_ ,up_block_types=A_ ,block_out_channels=A_ ,layers_per_block=A_ ,act_fn=A_ ,norm_num_groups=A_ ,norm_type=A_ ,)
@apply_forward_hook
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : torch.FloatTensor ,A_ : bool = True ) -> VQEncoderOutput:
A = self.encoder(A_ )
A = self.quant_conv(A_ )
if not return_dict:
return (h,)
return VQEncoderOutput(latents=A_ )
@apply_forward_hook
def _SCREAMING_SNAKE_CASE ( self : str ,A_ : torch.FloatTensor ,A_ : bool = False ,A_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
# also go through quantization layer
if not force_not_quantize:
A , A , A = self.quantize(A_ )
else:
A = h
A = self.post_quant_conv(A_ )
A = self.decoder(A_ ,quant if self.config.norm_type == 'spatial' else None )
if not return_dict:
return (dec,)
return DecoderOutput(sample=A_ )
def _SCREAMING_SNAKE_CASE ( self : int ,A_ : torch.FloatTensor ,A_ : bool = True ) -> Union[DecoderOutput, torch.FloatTensor]:
A = sample
A = self.encode(A_ ).latents
A = self.decode(A_ ).sample
if not return_dict:
return (dec,)
return DecoderOutput(sample=A_ ) | 704 |
"""simple docstring"""
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
_lowercase = logging.get_logger(__name__)
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: Dict = ['''pixel_values''']
def __init__( self : Optional[Any] ,A_ : bool = True ,A_ : Optional[Dict[str, int]] = None ,A_ : PILImageResampling = PILImageResampling.BILINEAR ,A_ : bool = True ,A_ : Dict[str, int] = None ,A_ : bool = True ,A_ : Union[int, float] = 1 / 255 ,A_ : bool = True ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,**A_ : Optional[Any] ,) -> None:
super().__init__(**A_ )
A = size if size is not None else {'shortest_edge': 256}
A = get_size_dict(A_ ,default_to_square=A_ )
A = crop_size if crop_size is not None else {'height': 224, 'width': 224}
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_resize
A = size
A = resample
A = do_center_crop
A = crop_size
A = do_rescale
A = rescale_factor
A = do_normalize
A = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
A = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : PILImageResampling = PILImageResampling.BICUBIC ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ ,default_to_square=A_ )
if "shortest_edge" not in size:
raise ValueError(F'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A = get_resize_output_image_size(A_ ,size=size['shortest_edge'] ,default_to_square=A_ )
return resize(A_ ,size=A_ ,resample=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : Dict[str, int] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : int ,) -> np.ndarray:
A = get_size_dict(A_ )
if "height" not in size or "width" not in size:
raise ValueError(F'The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}' )
return center_crop(A_ ,size=(size['height'], size['width']) ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : np.ndarray ,A_ : float ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : List[str] ) -> np.ndarray:
return rescale(A_ ,scale=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : np.ndarray ,A_ : Union[float, List[float]] ,A_ : Union[float, List[float]] ,A_ : Optional[Union[str, ChannelDimension]] = None ,**A_ : Any ,) -> np.ndarray:
return normalize(A_ ,mean=A_ ,std=A_ ,data_format=A_ ,**A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : ImageInput ,A_ : Optional[bool] = None ,A_ : Dict[str, int] = None ,A_ : PILImageResampling = None ,A_ : bool = None ,A_ : Dict[str, int] = None ,A_ : Optional[bool] = None ,A_ : Optional[float] = None ,A_ : Optional[bool] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[float, List[float]]] = None ,A_ : Optional[Union[str, TensorType]] = None ,A_ : Union[str, ChannelDimension] = ChannelDimension.FIRST ,**A_ : Tuple ,) -> List[Any]:
A = do_resize if do_resize is not None else self.do_resize
A = size if size is not None else self.size
A = get_size_dict(A_ ,default_to_square=A_ )
A = resample if resample is not None else self.resample
A = do_center_crop if do_center_crop is not None else self.do_center_crop
A = crop_size if crop_size is not None else self.crop_size
A = get_size_dict(A_ ,param_name='crop_size' )
A = do_rescale if do_rescale is not None else self.do_rescale
A = rescale_factor if rescale_factor is not None else self.rescale_factor
A = do_normalize if do_normalize is not None else self.do_normalize
A = image_mean if image_mean is not None else self.image_mean
A = image_std if image_std is not None else self.image_std
A = make_list_of_images(A_ )
if not valid_images(A_ ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None:
raise ValueError('Size must be specified if do_resize is True.' )
if do_center_crop and crop_size is None:
raise ValueError('Crop size must be specified if do_center_crop is True.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
A = [to_numpy_array(A_ ) for image in images]
if do_resize:
A = [self.resize(image=A_ ,size=A_ ,resample=A_ ) for image in images]
if do_center_crop:
A = [self.center_crop(image=A_ ,size=A_ ) for image in images]
if do_rescale:
A = [self.rescale(image=A_ ,scale=A_ ) for image in images]
if do_normalize:
A = [self.normalize(image=A_ ,mean=A_ ,std=A_ ) for image in images]
A = [to_channel_dimension_format(A_ ,A_ ) for image in images]
A = {'pixel_values': images}
return BatchFeature(data=A_ ,tensor_type=A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any] ,A_ : List[Tuple] = None ) -> str:
A = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(A_ ) != len(A_ ):
raise ValueError(
'Make sure that you pass in as many target sizes as the batch dimension of the logits' )
if is_torch_tensor(A_ ):
A = target_sizes.numpy()
A = []
for idx in range(len(A_ ) ):
A = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) ,size=target_sizes[idx] ,mode='bilinear' ,align_corners=A_ )
A = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(A_ )
else:
A = logits.argmax(dim=1 )
A = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 22 | 0 |
"""simple docstring"""
from math import pow, sqrt
def _snake_case ( *snake_case__ : float ):
A = len(_lowerCamelCase ) > 0 and all(value > 0.0 for value in values )
return result
def _snake_case ( snake_case__ : float , snake_case__ : float ):
return (
round(sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase )
else ValueError('Input Error: Molar mass values must greater than 0.' )
)
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
return (
round(effusion_rate * sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
return (
round(effusion_rate / sqrt(molar_mass_a / molar_mass_a ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
return (
round(molar_mass / pow(effusion_rate_a / effusion_rate_a , 2 ) , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
)
def _snake_case ( snake_case__ : float , snake_case__ : float , snake_case__ : float ):
return (
round(pow(effusion_rate_a / effusion_rate_a , 2 ) / molar_mass , 6 )
if validate(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else ValueError(
'Input Error: Molar mass and effusion rate values must greater than 0.' )
) | 705 |
"""simple docstring"""
import argparse
import os
import pickle
import sys
import torch
from transformers import TransfoXLConfig, TransfoXLLMHeadModel, load_tf_weights_in_transfo_xl
from transformers.models.transfo_xl import tokenization_transfo_xl as data_utils
from transformers.models.transfo_xl.tokenization_transfo_xl import CORPUS_NAME, VOCAB_FILES_NAMES
from transformers.utils import CONFIG_NAME, WEIGHTS_NAME, logging
logging.set_verbosity_info()
# We do this to be able to load python 2 datasets pickles
# See e.g. https://stackoverflow.com/questions/2121874/python-pickling-after-changing-a-modules-directory/2121918#2121918
_lowercase = data_utils.TransfoXLTokenizer
_lowercase = data_utils.TransfoXLCorpus
_lowercase = data_utils
_lowercase = data_utils
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : Dict , snake_case__ : Tuple , snake_case__ : int ):
if transfo_xl_dataset_file:
# Convert a pre-processed corpus (see original TensorFlow repo)
with open(snake_case__ , 'rb' ) as fp:
A = pickle.load(snake_case__ , encoding='latin1' )
# Save vocabulary and dataset cache as Dictionaries (should be better than pickles for the long-term)
A = pytorch_dump_folder_path + '/' + VOCAB_FILES_NAMES['pretrained_vocab_file']
print(F'Save vocabulary to {pytorch_vocab_dump_path}' )
A = corpus.vocab.__dict__
torch.save(snake_case__ , snake_case__ )
A = corpus.__dict__
corpus_dict_no_vocab.pop('vocab' , snake_case__ )
A = pytorch_dump_folder_path + '/' + CORPUS_NAME
print(F'Save dataset to {pytorch_dataset_dump_path}' )
torch.save(snake_case__ , snake_case__ )
if tf_checkpoint_path:
# Convert a pre-trained TensorFlow model
A = os.path.abspath(snake_case__ )
A = os.path.abspath(snake_case__ )
print(F'Converting Transformer XL checkpoint from {tf_path} with config at {config_path}.' )
# Initialise PyTorch model
if transfo_xl_config_file == "":
A = TransfoXLConfig()
else:
A = TransfoXLConfig.from_json_file(snake_case__ )
print(F'Building PyTorch model from configuration: {config}' )
A = TransfoXLLMHeadModel(snake_case__ )
A = load_tf_weights_in_transfo_xl(snake_case__ , snake_case__ , snake_case__ )
# Save pytorch-model
A = os.path.join(snake_case__ , snake_case__ )
A = os.path.join(snake_case__ , snake_case__ )
print(F'Save PyTorch model to {os.path.abspath(snake_case__ )}' )
torch.save(model.state_dict() , snake_case__ )
print(F'Save configuration file to {os.path.abspath(snake_case__ )}' )
with open(snake_case__ , 'w' , encoding='utf-8' ) as f:
f.write(config.to_json_string() )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=True,
help='''Path to the folder to store the PyTorch model or dataset/vocab.''',
)
parser.add_argument(
'''--tf_checkpoint_path''',
default='''''',
type=str,
help='''An optional path to a TensorFlow checkpoint path to be converted.''',
)
parser.add_argument(
'''--transfo_xl_config_file''',
default='''''',
type=str,
help=(
'''An optional config json file corresponding to the pre-trained BERT model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--transfo_xl_dataset_file''',
default='''''',
type=str,
help='''An optional dataset file to be converted in a vocabulary.''',
)
_lowercase = parser.parse_args()
convert_transfo_xl_checkpoint_to_pytorch(
args.tf_checkpoint_path,
args.transfo_xl_config_file,
args.pytorch_dump_folder_path,
args.transfo_xl_dataset_file,
) | 22 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import BertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
BertTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class lowerCAmelCase_ ( _lowercase , unittest.TestCase ):
'''simple docstring'''
_lowerCamelCase: Optional[Any] = BertTokenizer
_lowerCamelCase: str = BertTokenizerFast
_lowerCamelCase: List[str] = True
_lowerCamelCase: str = True
_lowerCamelCase: Optional[int] = filter_non_english
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Tuple:
super().setUp()
A = [
'[UNK]',
'[CLS]',
'[SEP]',
'[PAD]',
'[MASK]',
'want',
'##want',
'##ed',
'wa',
'un',
'runn',
'##ing',
',',
'low',
'lowest',
]
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as vocab_writer:
vocab_writer.write(''.join([x + '\n' for x in vocab_tokens] ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any] ) -> Optional[int]:
A = 'UNwant\u00E9d,running'
A = 'unwanted, running'
return input_text, output_text
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Optional[int]:
A = self.tokenizer_class(self.vocab_file )
A = tokenizer.tokenize('UNwant\u00E9d,running' )
self.assertListEqual(__UpperCamelCase ,['un', '##want', '##ed', ',', 'runn', '##ing'] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) ,[9, 6, 7, 12, 10, 11] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> int:
if not self.test_rust_tokenizer:
return
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
A = 'UNwant\u00E9d,running'
A = tokenizer.tokenize(__UpperCamelCase )
A = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
A = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
A = rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
A = self.get_rust_tokenizer()
A = tokenizer.encode(__UpperCamelCase )
A = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
# With lower casing
A = self.get_tokenizer(do_lower_case=__UpperCamelCase )
A = self.get_rust_tokenizer(do_lower_case=__UpperCamelCase )
A = 'UNwant\u00E9d,running'
A = tokenizer.tokenize(__UpperCamelCase )
A = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
A = tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
A = rust_tokenizer.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
A = self.get_rust_tokenizer()
A = tokenizer.encode(__UpperCamelCase )
A = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
A = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize('ah\u535A\u63A8zz' ) ,['ah', '\u535A', '\u63A8', 'zz'] )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
A = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['hello', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = BasicTokenizer(do_lower_case=__UpperCamelCase ,strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hällo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['h\u00E9llo'] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
A = BasicTokenizer(do_lower_case=__UpperCamelCase ,strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['hallo', '!', 'how', 'are', 'you', '?'] )
self.assertListEqual(tokenizer.tokenize('H\u00E9llo' ) ,['hello'] )
def _SCREAMING_SNAKE_CASE ( self : str ) -> Tuple:
A = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? ' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
A = BasicTokenizer(do_lower_case=__UpperCamelCase ,strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HäLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[Any]:
A = BasicTokenizer(do_lower_case=__UpperCamelCase ,strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(' \tHäLLo!how \n Are yoU? ' ) ,['HaLLo', '!', 'how', 'Are', 'yoU', '?'] )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Any:
A = BasicTokenizer(do_lower_case=__UpperCamelCase ,never_split=['[UNK]'] )
self.assertListEqual(
tokenizer.tokenize(' \tHeLLo!how \n Are yoU? [UNK]' ) ,['HeLLo', '!', 'how', 'Are', 'yoU', '?', '[UNK]'] )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> str:
A = BasicTokenizer()
A = 'a\n\'ll !!to?\'d of, can\'t.'
A = ['a', '\'', 'll', '!', '!', 'to', '?', '\'', 'd', 'of', ',', 'can', '\'', 't', '.']
self.assertListEqual(tokenizer.tokenize(__UpperCamelCase ) ,__UpperCamelCase )
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
A = ['[UNK]', '[CLS]', '[SEP]', 'want', '##want', '##ed', 'wa', 'un', 'runn', '##ing']
A = {}
for i, token in enumerate(__UpperCamelCase ):
A = i
A = WordpieceTokenizer(vocab=__UpperCamelCase ,unk_token='[UNK]' )
self.assertListEqual(tokenizer.tokenize('' ) ,[] )
self.assertListEqual(tokenizer.tokenize('unwanted running' ) ,['un', '##want', '##ed', 'runn', '##ing'] )
self.assertListEqual(tokenizer.tokenize('unwantedX running' ) ,['[UNK]', 'runn', '##ing'] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> str:
self.assertTrue(_is_whitespace(' ' ) )
self.assertTrue(_is_whitespace('\t' ) )
self.assertTrue(_is_whitespace('\r' ) )
self.assertTrue(_is_whitespace('\n' ) )
self.assertTrue(_is_whitespace('\u00A0' ) )
self.assertFalse(_is_whitespace('A' ) )
self.assertFalse(_is_whitespace('-' ) )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Dict:
self.assertTrue(_is_control('\u0005' ) )
self.assertFalse(_is_control('A' ) )
self.assertFalse(_is_control(' ' ) )
self.assertFalse(_is_control('\t' ) )
self.assertFalse(_is_control('\r' ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> int:
self.assertTrue(_is_punctuation('-' ) )
self.assertTrue(_is_punctuation('$' ) )
self.assertTrue(_is_punctuation('`' ) )
self.assertTrue(_is_punctuation('.' ) )
self.assertFalse(_is_punctuation('A' ) )
self.assertFalse(_is_punctuation(' ' ) )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__UpperCamelCase ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
self.assertListEqual(
[rust_tokenizer.tokenize(__UpperCamelCase ) for t in ['Test', '\xad', 'test']] ,[['[UNK]'], [], ['[UNK]']] )
@slow
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> int:
A = self.tokenizer_class.from_pretrained('bert-base-uncased' )
A = tokenizer.encode('sequence builders' ,add_special_tokens=__UpperCamelCase )
A = tokenizer.encode('multi-sequence build' ,add_special_tokens=__UpperCamelCase )
A = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
A = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase ,__UpperCamelCase )
assert encoded_sentence == [101] + text + [102]
assert encoded_pair == [101] + text + [102] + text_a + [102]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
A = F'A, naïve {tokenizer_r.mask_token} AllenNLP sentence.'
A = tokenizer_r.encode_plus(
__UpperCamelCase ,return_attention_mask=__UpperCamelCase ,return_token_type_ids=__UpperCamelCase ,return_offsets_mapping=__UpperCamelCase ,add_special_tokens=__UpperCamelCase ,)
A = tokenizer_r.do_lower_case if hasattr(__UpperCamelCase ,'do_lower_case' ) else False
A = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), 'A'),
((1, 2), ','),
((3, 5), 'na'),
((5, 6), '##ï'),
((6, 8), '##ve'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'Allen'),
((21, 23), '##NL'),
((23, 24), '##P'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), 'a'),
((1, 2), ','),
((3, 8), 'naive'),
((9, 15), tokenizer_r.mask_token),
((16, 21), 'allen'),
((21, 23), '##nl'),
((23, 24), '##p'),
((25, 33), 'sentence'),
((33, 34), '.'),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] ,tokenizer_r.convert_ids_to_tokens(tokens['input_ids'] ) )
self.assertEqual([e[0] for e in expected_results] ,tokens['offset_mapping'] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Tuple:
A = ['的', '人', '有']
A = ''.join(__UpperCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(F'{tokenizer.__class__.__name__} ({pretrained_name})' ):
A = True
A = self.tokenizer_class.from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
A = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
A = tokenizer_p.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
A = tokenizer_r.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
A = tokenizer_r.convert_ids_to_tokens(__UpperCamelCase )
A = tokenizer_p.convert_ids_to_tokens(__UpperCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
A = False
A = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
A = self.tokenizer_class.from_pretrained(__UpperCamelCase ,**__UpperCamelCase )
A = tokenizer_r.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
A = tokenizer_p.encode(__UpperCamelCase ,add_special_tokens=__UpperCamelCase )
A = tokenizer_r.convert_ids_to_tokens(__UpperCamelCase )
A = tokenizer_p.convert_ids_to_tokens(__UpperCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
A = [
F'##{token}' if idx != 0 else token for idx, token in enumerate(__UpperCamelCase )
]
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
self.assertListEqual(__UpperCamelCase ,__UpperCamelCase )
| 706 |
"""simple docstring"""
from collections import deque
from math import floor
from random import random
from time import time
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : Dict ) -> int:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Union[str, Any] ,A_ : Any ,A_ : Optional[Any]=1 ) -> int:
if self.graph.get(A_ ):
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
A = [[w, v]]
if not self.graph.get(A_ ):
A = []
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Union[str, Any] ,A_ : Dict ) -> Optional[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : int=-2 ,A_ : Dict=-1 ) -> str:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Any=-1 ) -> int:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Union[str, Any]=-2 ) -> Optional[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Tuple ) -> Any:
A = 0
for x in self.graph:
for y in self.graph[x]:
if y[1] == u:
count += 1
return count
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Union[str, Any] ) -> str:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Union[str, Any]=-2 ) -> Any:
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
A = []
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
sorted_nodes.append(stack.pop() )
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return sorted_nodes
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Tuple=-2 ,A_ : List[str]=-1 ) -> str:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Union[str, Any]=-2 ) -> Dict:
A = time()
self.bfs(A_ )
A = time()
return end - begin
class lowerCAmelCase_ :
'''simple docstring'''
def __init__( self : List[Any] ) -> Tuple:
A = {}
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[Any] ,A_ : str ,A_ : List[str]=1 ) -> Dict:
# check if the u exists
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[u].count([w, v] ) == 0:
self.graph[u].append([w, v] )
else:
# if u does not exist
A = [[w, v]]
# add the other way
if self.graph.get(A_ ):
# if there already is a edge
if self.graph[v].count([w, u] ) == 0:
self.graph[v].append([w, u] )
else:
# if u does not exist
A = [[w, u]]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[Any] ,A_ : List[str] ) -> List[Any]:
if self.graph.get(A_ ):
for _ in self.graph[u]:
if _[1] == v:
self.graph[u].remove(A_ )
# the other way round
if self.graph.get(A_ ):
for _ in self.graph[v]:
if _[1] == u:
self.graph[v].remove(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : List[str]=-2 ,A_ : List[Any]=-1 ) -> int:
if s == d:
return []
A = []
A = []
if s == -2:
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = s
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
if node[1] == d:
visited.append(A_ )
return visited
else:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int]=-1 ) -> List[Any]:
if c == -1:
A = floor(random() * 1_0000 ) + 10
for i in range(A_ ):
# every vertex has max 100 edges
for _ in range(floor(random() * 102 ) + 1 ):
A = floor(random() * c ) + 1
if n != i:
self.add_pair(A_ ,A_ ,1 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : Dict=-2 ) -> List[Any]:
A = deque()
A = []
if s == -2:
A = list(self.graph )[0]
d.append(A_ )
visited.append(A_ )
while d:
A = d.popleft()
if len(self.graph[s] ) != 0:
for node in self.graph[s]:
if visited.count(node[1] ) < 1:
d.append(node[1] )
visited.append(node[1] )
return visited
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[Any] ) -> List[Any]:
return len(self.graph[u] )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> List[Any]:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack >= 0:
if stack[len_stack] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
anticipating_nodes.add(stack[len_stack] )
len_stack -= 1
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return list(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Any:
A = []
A = []
A = list(self.graph )[0]
stack.append(A_ )
visited.append(A_ )
A = -2
A = []
A = s
A = False
A = set()
while True:
# check if there is any non isolated nodes
if len(self.graph[s] ) != 0:
A = s
for node in self.graph[s]:
if (
visited.count(node[1] ) > 0
and node[1] != parent
and indirect_parents.count(node[1] ) > 0
and not on_the_way_back
):
A = len(A_ ) - 1
while len_stack_minus_one >= 0:
if stack[len_stack_minus_one] == node[1]:
anticipating_nodes.add(node[1] )
break
else:
return True
if visited.count(node[1] ) < 1:
stack.append(node[1] )
visited.append(node[1] )
A = node[1]
break
# check if all the children are visited
if s == ss:
stack.pop()
A = True
if len(A_ ) != 0:
A = stack[len(A_ ) - 1]
else:
A = False
indirect_parents.append(A_ )
A = s
A = ss
# check if se have reached the starting point
if len(A_ ) == 0:
return False
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Any:
return list(self.graph )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : Optional[Any]=-2 ,A_ : List[str]=-1 ) -> Any:
A = time()
self.dfs(A_ ,A_ )
A = time()
return end - begin
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,A_ : List[Any]=-2 ) -> Union[str, Any]:
A = time()
self.bfs(A_ )
A = time()
return end - begin | 22 | 0 |
"""simple docstring"""
import os
# Precomputes a list of the 100 first triangular numbers
_lowercase = [int(0.5 * n * (n + 1)) for n in range(1, 1_01)]
def _snake_case ( ):
A = os.path.dirname(os.path.realpath(snake_case__ ) )
A = os.path.join(snake_case__ , 'words.txt' )
A = """"""
with open(snake_case__ ) as f:
A = f.readline()
A = [word.strip('\"' ) for word in words.strip('\r\n' ).split(',' )]
A = [
word
for word in [sum(ord(snake_case__ ) - 64 for x in word ) for word in words]
if word in TRIANGULAR_NUMBERS
]
return len(snake_case__ )
if __name__ == "__main__":
print(solution()) | 707 |
"""simple docstring"""
from json import JSONDecodeError # Workaround for requests.exceptions.JSONDecodeError
import requests
def _snake_case ( snake_case__ : str = "isbn/0140328726" ):
A = olid.strip().strip('/' ) # Remove leading/trailing whitespace & slashes
if new_olid.count('/' ) != 1:
A = F'{olid} is not a valid Open Library olid'
raise ValueError(snake_case__ )
return requests.get(F'https://openlibrary.org/{new_olid}.json' ).json()
def _snake_case ( snake_case__ : dict ):
A = {
'title': 'Title',
'publish_date': 'Publish date',
'authors': 'Authors',
'number_of_pages': 'Number of pages:',
'first_sentence': 'First sentence',
'isbn_10': 'ISBN (10)',
'isbn_13': 'ISBN (13)',
}
A = {better_key: ol_book_data[key] for key, better_key in desired_keys.items()}
A = [
get_openlibrary_data(author['key'] )['name'] for author in data['Authors']
]
A = data['First sentence']['value']
for key, value in data.items():
if isinstance(snake_case__ , snake_case__ ):
A = ', '.join(snake_case__ )
return data
if __name__ == "__main__":
import doctest
doctest.testmod()
while True:
_lowercase = input('''\nEnter the ISBN code to search (or \'quit\' to stop): ''').strip()
if isbn.lower() in ("", "q", "quit", "exit", "stop"):
break
if len(isbn) not in (10, 13) or not isbn.isdigit():
print(F"""Sorry, {isbn} is not a valid ISBN. Please, input a valid ISBN.""")
continue
print(F"""\nSearching Open Library for ISBN: {isbn}...\n""")
try:
_lowercase = summarize_book(get_openlibrary_data(F"""isbn/{isbn}"""))
print('''\n'''.join(F"""{key}: {value}""" for key, value in book_summary.items()))
except JSONDecodeError: # Workaround for requests.exceptions.RequestException:
print(F"""Sorry, there are no results for ISBN: {isbn}.""") | 22 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import CLIPTokenizer, CLIPTokenizerFast
from transformers.models.clip.tokenization_clip import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import IMAGE_PROCESSOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import CLIPSegProcessor, ViTImageProcessor
@require_vision
class lowerCAmelCase_ ( unittest.TestCase ):
'''simple docstring'''
def _SCREAMING_SNAKE_CASE ( self : List[Any] ) -> int:
A = tempfile.mkdtemp()
# fmt: off
A = ['l', 'o', 'w', 'e', 'r', 's', 't', 'i', 'd', 'n', 'lo', 'l</w>', 'w</w>', 'r</w>', 't</w>', 'low</w>', 'er</w>', 'lowest</w>', 'newer</w>', 'wider', '<unk>', '<|startoftext|>', '<|endoftext|>']
# fmt: on
A = dict(zip(UpperCamelCase__ ,range(len(UpperCamelCase__ ) ) ) )
A = ['#version: 0.2', 'l o', 'lo w</w>', 'e r</w>', '']
A = {'unk_token': '<unk>'}
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['vocab_file'] )
A = os.path.join(self.tmpdirname ,VOCAB_FILES_NAMES['merges_file'] )
with open(self.vocab_file ,'w' ,encoding='utf-8' ) as fp:
fp.write(json.dumps(UpperCamelCase__ ) + '\n' )
with open(self.merges_file ,'w' ,encoding='utf-8' ) as fp:
fp.write('\n'.join(UpperCamelCase__ ) )
A = {
'do_resize': True,
'size': 20,
'do_center_crop': True,
'crop_size': 18,
'do_normalize': True,
'image_mean': [0.48_14_54_66, 0.4_57_82_75, 0.40_82_10_73],
'image_std': [0.26_86_29_54, 0.26_13_02_58, 0.27_57_77_11],
}
A = os.path.join(self.tmpdirname ,UpperCamelCase__ )
with open(self.image_processor_file ,'w' ,encoding='utf-8' ) as fp:
json.dump(UpperCamelCase__ ,UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : str ,**A_ : List[Any] ) -> Optional[Any]:
return CLIPTokenizer.from_pretrained(self.tmpdirname ,**UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Dict ,**A_ : Optional[Any] ) -> Optional[int]:
return CLIPTokenizerFast.from_pretrained(self.tmpdirname ,**UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ,**A_ : Optional[Any] ) -> Any:
return ViTImageProcessor.from_pretrained(self.tmpdirname ,**UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Tuple:
shutil.rmtree(self.tmpdirname )
def _SCREAMING_SNAKE_CASE ( self : int ) -> Optional[int]:
A = [np.random.randint(255 ,size=(3, 30, 400) ,dtype=np.uinta )]
A = [Image.fromarray(np.moveaxis(UpperCamelCase__ ,0 ,-1 ) ) for x in image_inputs]
return image_inputs
def _SCREAMING_SNAKE_CASE ( self : str ) -> str:
A = self.get_tokenizer()
A = self.get_rust_tokenizer()
A = self.get_image_processor()
A = CLIPSegProcessor(tokenizer=UpperCamelCase__ ,image_processor=UpperCamelCase__ )
processor_slow.save_pretrained(self.tmpdirname )
A = CLIPSegProcessor.from_pretrained(self.tmpdirname ,use_fast=UpperCamelCase__ )
A = CLIPSegProcessor(tokenizer=UpperCamelCase__ ,image_processor=UpperCamelCase__ )
processor_fast.save_pretrained(self.tmpdirname )
A = CLIPSegProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() ,tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() ,tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer ,UpperCamelCase__ )
self.assertIsInstance(processor_fast.tokenizer ,UpperCamelCase__ )
self.assertEqual(processor_slow.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() ,image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor ,UpperCamelCase__ )
self.assertIsInstance(processor_fast.image_processor ,UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
A = CLIPSegProcessor(tokenizer=self.get_tokenizer() ,image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
A = self.get_tokenizer(bos_token='(BOS)' ,eos_token='(EOS)' )
A = self.get_image_processor(do_normalize=UpperCamelCase__ ,padding_value=1.0 )
A = CLIPSegProcessor.from_pretrained(
self.tmpdirname ,bos_token='(BOS)' ,eos_token='(EOS)' ,do_normalize=UpperCamelCase__ ,padding_value=1.0 )
self.assertEqual(processor.tokenizer.get_vocab() ,tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer ,UpperCamelCase__ )
self.assertEqual(processor.image_processor.to_json_string() ,image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor ,UpperCamelCase__ )
def _SCREAMING_SNAKE_CASE ( self : int ) -> str:
A = self.get_image_processor()
A = self.get_tokenizer()
A = CLIPSegProcessor(tokenizer=UpperCamelCase__ ,image_processor=UpperCamelCase__ )
A = self.prepare_image_inputs()
A = image_processor(UpperCamelCase__ ,return_tensors='np' )
A = processor(images=UpperCamelCase__ ,return_tensors='np' )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() ,input_processor[key].sum() ,delta=1e-2 )
def _SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Optional[int]:
A = self.get_image_processor()
A = self.get_tokenizer()
A = CLIPSegProcessor(tokenizer=UpperCamelCase__ ,image_processor=UpperCamelCase__ )
A = 'lower newer'
A = processor(text=UpperCamelCase__ )
A = tokenizer(UpperCamelCase__ )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] ,encoded_processor[key] )
def _SCREAMING_SNAKE_CASE ( self : Any ) -> Any:
A = self.get_image_processor()
A = self.get_tokenizer()
A = CLIPSegProcessor(tokenizer=UpperCamelCase__ ,image_processor=UpperCamelCase__ )
A = 'lower newer'
A = self.prepare_image_inputs()
A = processor(text=UpperCamelCase__ ,images=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) ,['input_ids', 'attention_mask', 'pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def _SCREAMING_SNAKE_CASE ( self : Any ) -> List[str]:
A = self.get_image_processor()
A = self.get_tokenizer()
A = CLIPSegProcessor(tokenizer=UpperCamelCase__ ,image_processor=UpperCamelCase__ )
A = self.prepare_image_inputs()
A = self.prepare_image_inputs()
A = processor(images=UpperCamelCase__ ,visual_prompt=UpperCamelCase__ )
self.assertListEqual(list(inputs.keys() ) ,['pixel_values', 'conditional_pixel_values'] )
# test if it raises when no input is passed
with pytest.raises(UpperCamelCase__ ):
processor()
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> List[Any]:
A = self.get_image_processor()
A = self.get_tokenizer()
A = CLIPSegProcessor(tokenizer=UpperCamelCase__ ,image_processor=UpperCamelCase__ )
A = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
A = processor.batch_decode(UpperCamelCase__ )
A = tokenizer.batch_decode(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ ,UpperCamelCase__ ) | 708 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
_lowercase = {
'''configuration_perceiver''': ['''PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PerceiverConfig''', '''PerceiverOnnxConfig'''],
'''tokenization_perceiver''': ['''PerceiverTokenizer'''],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = ['''PerceiverFeatureExtractor''']
_lowercase = ['''PerceiverImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowercase = [
'''PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PerceiverForImageClassificationConvProcessing''',
'''PerceiverForImageClassificationFourier''',
'''PerceiverForImageClassificationLearned''',
'''PerceiverForMaskedLM''',
'''PerceiverForMultimodalAutoencoding''',
'''PerceiverForOpticalFlow''',
'''PerceiverForSequenceClassification''',
'''PerceiverLayer''',
'''PerceiverModel''',
'''PerceiverPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_perceiver import PERCEIVER_PRETRAINED_CONFIG_ARCHIVE_MAP, PerceiverConfig, PerceiverOnnxConfig
from .tokenization_perceiver import PerceiverTokenizer
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_perceiver import PerceiverFeatureExtractor
from .image_processing_perceiver import PerceiverImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_perceiver import (
PERCEIVER_PRETRAINED_MODEL_ARCHIVE_LIST,
PerceiverForImageClassificationConvProcessing,
PerceiverForImageClassificationFourier,
PerceiverForImageClassificationLearned,
PerceiverForMaskedLM,
PerceiverForMultimodalAutoencoding,
PerceiverForOpticalFlow,
PerceiverForSequenceClassification,
PerceiverLayer,
PerceiverModel,
PerceiverPreTrainedModel,
)
else:
import sys
_lowercase = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 22 | 0 |
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def _snake_case ( snake_case__ : str , snake_case__ : Union[str, Any] , snake_case__ : Any , snake_case__ : int , snake_case__ : Dict=True , snake_case__ : List[Any]="pt" ):
A = {'add_prefix_space': True} if isinstance(snake_case__ , snake_case__ ) and not line.startswith(' ' ) else {}
A = padding_side
return tokenizer(
[line] , max_length=snake_case__ , padding='max_length' if pad_to_max_length else None , truncation=snake_case__ , return_tensors=snake_case__ , add_special_tokens=snake_case__ , **snake_case__ , )
def _snake_case ( snake_case__ : Any , snake_case__ : Tuple , snake_case__ : List[str]=None , ):
A = input_ids.ne(snake_case__ ).any(dim=0 )
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class lowerCAmelCase_ ( a__ ):
'''simple docstring'''
def __init__( self : str ,A_ : List[Any] ,A_ : Optional[int] ,A_ : Optional[int] ,A_ : Dict ,A_ : Tuple="train" ,A_ : Tuple=None ,A_ : Dict=None ,A_ : Dict=None ,A_ : int="" ,) -> str:
super().__init__()
A = Path(lowerCamelCase_ ).joinpath(type_path + '.source' )
A = Path(lowerCamelCase_ ).joinpath(type_path + '.target' )
A = self.get_char_lens(self.src_file )
A = max_source_length
A = max_target_length
assert min(self.src_lens ) > 0, F'found empty line in {self.src_file}'
A = tokenizer
A = prefix
if n_obs is not None:
A = self.src_lens[:n_obs]
A = src_lang
A = tgt_lang
def __len__( self : Any ) -> Tuple:
return len(self.src_lens )
def __getitem__( self : Optional[Any] ,A_ : Union[str, Any] ) -> Dict[str, torch.Tensor]:
A = index + 1 # linecache starts at 1
A = self.prefix + linecache.getline(str(self.src_file ) ,lowerCamelCase_ ).rstrip('\n' )
A = linecache.getline(str(self.tgt_file ) ,lowerCamelCase_ ).rstrip('\n' )
assert source_line, F'empty source line for index {index}'
assert tgt_line, F'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,lowerCamelCase_ ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,lowerCamelCase_ ) else self.tokenizer
)
A = self.tokenizer.generator if isinstance(self.tokenizer ,lowerCamelCase_ ) else self.tokenizer
A = encode_line(lowerCamelCase_ ,lowerCamelCase_ ,self.max_source_length ,'right' )
A = encode_line(lowerCamelCase_ ,lowerCamelCase_ ,self.max_target_length ,'right' )
A = source_inputs['input_ids'].squeeze()
A = target_inputs['input_ids'].squeeze()
A = source_inputs['attention_mask'].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : Union[str, Any] ) -> Dict:
return [len(lowerCamelCase_ ) for x in Path(lowerCamelCase_ ).open().readlines()]
def _SCREAMING_SNAKE_CASE ( self : List[Any] ,A_ : str ) -> Dict[str, torch.Tensor]:
A = torch.stack([x['input_ids'] for x in batch] )
A = torch.stack([x['attention_mask'] for x in batch] )
A = torch.stack([x['decoder_input_ids'] for x in batch] )
A = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
A = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,lowerCamelCase_ )
else self.tokenizer.pad_token_id
)
A = trim_batch(lowerCamelCase_ ,lowerCamelCase_ )
A , A = trim_batch(lowerCamelCase_ ,lowerCamelCase_ ,attention_mask=lowerCamelCase_ )
A = {
'input_ids': source_ids,
'attention_mask': source_mask,
'decoder_input_ids': y,
}
return batch
_lowercase = getLogger(__name__)
def _snake_case ( snake_case__ : Dict ):
return list(itertools.chain.from_iterable(snake_case__ ) )
def _snake_case ( snake_case__ : Optional[int] ):
A = get_git_info()
save_json(snake_case__ , os.path.join(snake_case__ , 'git_log.json' ) )
def _snake_case ( snake_case__ : Any , snake_case__ : Optional[Any] , snake_case__ : Union[str, Any]=4 , **snake_case__ : List[str] ):
with open(snake_case__ , 'w' ) as f:
json.dump(snake_case__ , snake_case__ , indent=snake_case__ , **snake_case__ )
def _snake_case ( snake_case__ : List[str] ):
with open(snake_case__ ) as f:
return json.load(snake_case__ )
def _snake_case ( ):
A = git.Repo(search_parent_directories=snake_case__ )
A = {
'repo_id': str(snake_case__ ),
'repo_sha': str(repo.head.object.hexsha ),
'repo_branch': str(repo.active_branch ),
'hostname': str(socket.gethostname() ),
}
return repo_infos
def _snake_case ( snake_case__ : List[str] , snake_case__ : Any ):
return list(map(snake_case__ , snake_case__ ) )
def _snake_case ( snake_case__ : Union[str, Any] , snake_case__ : List[str] ):
with open(snake_case__ , 'wb' ) as f:
return pickle.dump(snake_case__ , snake_case__ )
def _snake_case ( snake_case__ : Dict ):
def remove_articles(snake_case__ : Optional[Any] ):
return re.sub(r'\b(a|an|the)\b' , ' ' , snake_case__ )
def white_space_fix(snake_case__ : Union[str, Any] ):
return " ".join(text.split() )
def remove_punc(snake_case__ : int ):
A = set(string.punctuation )
return "".join(ch for ch in text if ch not in exclude )
def lower(snake_case__ : Union[str, Any] ):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(snake_case__ ) ) ) )
def _snake_case ( snake_case__ : Dict , snake_case__ : List[Any] ):
A = normalize_answer(snake_case__ ).split()
A = normalize_answer(snake_case__ ).split()
A = Counter(snake_case__ ) & Counter(snake_case__ )
A = sum(common.values() )
if num_same == 0:
return 0
A = 1.0 * num_same / len(snake_case__ )
A = 1.0 * num_same / len(snake_case__ )
A = (2 * precision * recall) / (precision + recall)
return fa
def _snake_case ( snake_case__ : str , snake_case__ : Optional[int] ):
return normalize_answer(snake_case__ ) == normalize_answer(snake_case__ )
def _snake_case ( snake_case__ : List[str] , snake_case__ : str ):
assert len(snake_case__ ) == len(snake_case__ )
A = 0
for hypo, pred in zip(snake_case__ , snake_case__ ):
em += exact_match_score(snake_case__ , snake_case__ )
if len(snake_case__ ) > 0:
em /= len(snake_case__ )
return {"em": em}
def _snake_case ( snake_case__ : Any ):
return model_prefix.startswith('rag' )
def _snake_case ( snake_case__ : Optional[Any] , snake_case__ : Optional[int] , snake_case__ : str ):
A = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A = 'dropout_rate'
for p in extra_params:
if getattr(snake_case__ , snake_case__ , snake_case__ ):
if not hasattr(snake_case__ , snake_case__ ) and not hasattr(snake_case__ , equivalent_param[p] ):
logger.info('config doesn\'t have a `{}` attribute'.format(snake_case__ ) )
delattr(snake_case__ , snake_case__ )
continue
A = p if hasattr(snake_case__ , snake_case__ ) else equivalent_param[p]
setattr(snake_case__ , snake_case__ , getattr(snake_case__ , snake_case__ ) )
delattr(snake_case__ , snake_case__ )
return hparams, config | 709 |
"""simple docstring"""
import argparse
import json
from pathlib import Path
import requests
import timm
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import AutoImageProcessor, SwinvaConfig, SwinvaForImageClassification
def _snake_case ( snake_case__ : int ):
A = SwinvaConfig()
A = swinva_name.split('_' )
A = name_split[1]
if "to" in name_split[3]:
A = int(name_split[3][-3:] )
else:
A = int(name_split[3] )
if "to" in name_split[2]:
A = int(name_split[2][-2:] )
else:
A = int(name_split[2][6:] )
if model_size == "tiny":
A = 96
A = (2, 2, 6, 2)
A = (3, 6, 12, 24)
elif model_size == "small":
A = 96
A = (2, 2, 18, 2)
A = (3, 6, 12, 24)
elif model_size == "base":
A = 128
A = (2, 2, 18, 2)
A = (4, 8, 16, 32)
else:
A = 192
A = (2, 2, 18, 2)
A = (6, 12, 24, 48)
if "to" in swinva_name:
A = (12, 12, 12, 6)
if ("22k" in swinva_name) and ("to" not in swinva_name):
A = 2_1841
A = 'huggingface/label-files'
A = 'imagenet-22k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
else:
A = 1000
A = 'huggingface/label-files'
A = 'imagenet-1k-id2label.json'
A = json.load(open(hf_hub_download(snake_case__ , snake_case__ , repo_type='dataset' ) , 'r' ) )
A = {int(snake_case__ ): v for k, v in idalabel.items()}
A = idalabel
A = {v: k for k, v in idalabel.items()}
A = img_size
A = num_classes
A = embed_dim
A = depths
A = num_heads
A = window_size
return config
def _snake_case ( snake_case__ : List[Any] ):
if "patch_embed.proj" in name:
A = name.replace('patch_embed.proj' , 'embeddings.patch_embeddings.projection' )
if "patch_embed.norm" in name:
A = name.replace('patch_embed.norm' , 'embeddings.norm' )
if "layers" in name:
A = 'encoder.' + name
if "attn.proj" in name:
A = name.replace('attn.proj' , 'attention.output.dense' )
if "attn" in name:
A = name.replace('attn' , 'attention.self' )
if "norm1" in name:
A = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name:
A = name.replace('norm2' , 'layernorm_after' )
if "mlp.fc1" in name:
A = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
A = name.replace('mlp.fc2' , 'output.dense' )
if "q_bias" in name:
A = name.replace('q_bias' , 'query.bias' )
if "k_bias" in name:
A = name.replace('k_bias' , 'key.bias' )
if "v_bias" in name:
A = name.replace('v_bias' , 'value.bias' )
if "cpb_mlp" in name:
A = name.replace('cpb_mlp' , 'continuous_position_bias_mlp' )
if name == "norm.weight":
A = 'layernorm.weight'
if name == "norm.bias":
A = 'layernorm.bias'
if "head" in name:
A = name.replace('head' , 'classifier' )
else:
A = 'swinv2.' + name
return name
def _snake_case ( snake_case__ : List[Any] , snake_case__ : List[Any] ):
for key in orig_state_dict.copy().keys():
A = orig_state_dict.pop(snake_case__ )
if "mask" in key:
continue
elif "qkv" in key:
A = key.split('.' )
A = int(key_split[1] )
A = int(key_split[3] )
A = model.swinva.encoder.layers[layer_num].blocks[block_num].attention.self.all_head_size
if "weight" in key:
A = val[:dim, :]
A = val[dim : dim * 2, :]
A = val[-dim:, :]
else:
A = val[:dim]
A = val[
dim : dim * 2
]
A = val[-dim:]
else:
A = val
return orig_state_dict
def _snake_case ( snake_case__ : Optional[int] , snake_case__ : Tuple ):
A = timm.create_model(snake_case__ , pretrained=snake_case__ )
timm_model.eval()
A = get_swinva_config(snake_case__ )
A = SwinvaForImageClassification(snake_case__ )
model.eval()
A = convert_state_dict(timm_model.state_dict() , snake_case__ )
model.load_state_dict(snake_case__ )
A = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A = AutoImageProcessor.from_pretrained('microsoft/{}'.format(swinva_name.replace('_' , '-' ) ) )
A = Image.open(requests.get(snake_case__ , stream=snake_case__ ).raw )
A = image_processor(images=snake_case__ , return_tensors='pt' )
A = timm_model(inputs['pixel_values'] )
A = model(**snake_case__ ).logits
assert torch.allclose(snake_case__ , snake_case__ , atol=1e-3 )
print(F'Saving model {swinva_name} to {pytorch_dump_folder_path}' )
model.save_pretrained(snake_case__ )
print(F'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(snake_case__ )
model.push_to_hub(
repo_path_or_name=Path(snake_case__ , snake_case__ ) , organization='nandwalritik' , commit_message='Add model' , )
if __name__ == "__main__":
_lowercase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--swinv2_name''',
default='''swinv2_tiny_patch4_window8_256''',
type=str,
help='''Name of the Swinv2 timm model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model directory.'''
)
_lowercase = parser.parse_args()
convert_swinva_checkpoint(args.swinva_name, args.pytorch_dump_folder_path) | 22 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
_lowercase = logging.get_logger(__name__)
_lowercase = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class lowerCAmelCase_ ( lowercase_ ):
'''simple docstring'''
_lowerCamelCase: Any = '''vivit'''
def __init__( self : Optional[Any] ,A_ : Union[str, Any]=224 ,A_ : List[str]=32 ,A_ : Dict=[2, 16, 16] ,A_ : List[str]=3 ,A_ : Optional[Any]=768 ,A_ : Dict=12 ,A_ : Union[str, Any]=12 ,A_ : Dict=3072 ,A_ : Any="gelu_fast" ,A_ : Tuple=0.0 ,A_ : List[Any]=0.0 ,A_ : Optional[int]=0.02 ,A_ : Optional[int]=1e-06 ,A_ : str=True ,**A_ : Any ,) -> Dict:
A = hidden_size
A = num_hidden_layers
A = num_attention_heads
A = intermediate_size
A = hidden_act
A = hidden_dropout_prob
A = attention_probs_dropout_prob
A = initializer_range
A = layer_norm_eps
A = image_size
A = num_frames
A = tubelet_size
A = num_channels
A = qkv_bias
super().__init__(**UpperCamelCase__ ) | 710 |
"""simple docstring"""
from math import pi, sqrt
def _snake_case ( snake_case__ : float ):
if num <= 0:
raise ValueError('math domain error' )
if num > 171.5:
raise OverflowError('math range error' )
elif num - int(snake_case__ ) not in (0, 0.5):
raise NotImplementedError('num must be an integer or a half-integer' )
elif num == 0.5:
return sqrt(snake_case__ )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _snake_case ( ):
assert gamma(0.5 ) == sqrt(snake_case__ )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
_lowercase = 1.0
while num:
_lowercase = float(input('''Gamma of: '''))
print(F"""gamma({num}) = {gamma(num)}""")
print('''\nEnter 0 to exit...''') | 22 | 0 |
"""simple docstring"""
import importlib.util
import os
import platform
from argparse import ArgumentParser
import huggingface_hub
from .. import __version__ as version
from ..utils import (
is_accelerate_available,
is_flax_available,
is_safetensors_available,
is_tf_available,
is_torch_available,
)
from . import BaseTransformersCLICommand
def _snake_case ( snake_case__ : Any ):
return EnvironmentCommand()
def _snake_case ( snake_case__ : str ):
return EnvironmentCommand(args.accelerate_config_file )
class lowerCAmelCase_ ( __UpperCAmelCase ):
'''simple docstring'''
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : ArgumentParser ) -> str:
A = parser.add_parser('env' )
download_parser.set_defaults(func=lowerCAmelCase_ )
download_parser.add_argument(
'--accelerate-config_file' ,default=lowerCAmelCase_ ,help='The accelerate config file to use for the default values in the launching script.' ,)
download_parser.set_defaults(func=lowerCAmelCase_ )
def __init__( self : Any ,A_ : Optional[Any] ,*A_ : Optional[Any] ) -> str:
A = accelerate_config_file
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ) -> Dict:
A = 'not installed'
if is_safetensors_available():
import safetensors
A = safetensors.__version__
elif importlib.util.find_spec('safetensors' ) is not None:
import safetensors
A = F'{safetensors.__version__} but is ignored because of PyTorch version too old.'
A = 'not installed'
A = A = 'not found'
if is_accelerate_available():
import accelerate
from accelerate.commands.config import default_config_file, load_config_from_file
A = accelerate.__version__
# Get the default from the config file.
if self._accelerate_config_file is not None or os.path.isfile(lowerCAmelCase_ ):
A = load_config_from_file(self._accelerate_config_file ).to_dict()
A = (
'\n'.join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()] )
if isinstance(lowerCAmelCase_ ,lowerCAmelCase_ )
else F'\t{accelerate_config}'
)
A = 'not installed'
A = 'NA'
if is_torch_available():
import torch
A = torch.__version__
A = torch.cuda.is_available()
A = 'not installed'
A = 'NA'
if is_tf_available():
import tensorflow as tf
A = tf.__version__
try:
# deprecated in v2.1
A = tf.test.is_gpu_available()
except AttributeError:
# returns list of devices, convert to bool
A = bool(tf.config.list_physical_devices('GPU' ) )
A = 'not installed'
A = 'not installed'
A = 'not installed'
A = 'NA'
if is_flax_available():
import flax
import jax
import jaxlib
A = flax.__version__
A = jax.__version__
A = jaxlib.__version__
A = jax.lib.xla_bridge.get_backend().platform
A = {
'`transformers` version': version,
'Platform': platform.platform(),
'Python version': platform.python_version(),
'Huggingface_hub version': huggingface_hub.__version__,
'Safetensors version': F'{safetensors_version}',
'Accelerate version': F'{accelerate_version}',
'Accelerate config': F'{accelerate_config_str}',
'PyTorch version (GPU?)': F'{pt_version} ({pt_cuda_available})',
'Tensorflow version (GPU?)': F'{tf_version} ({tf_cuda_available})',
'Flax version (CPU?/GPU?/TPU?)': F'{flax_version} ({jax_backend})',
'Jax version': F'{jax_version}',
'JaxLib version': F'{jaxlib_version}',
'Using GPU in script?': '<fill in>',
'Using distributed or parallel set-up in script?': '<fill in>',
}
print('\nCopy-and-paste the text below in your GitHub issue and FILL OUT the two last points.\n' )
print(self.format_dict(lowerCAmelCase_ ) )
return info
@staticmethod
def _SCREAMING_SNAKE_CASE ( A_ : Union[str, Any] ) -> int:
return "\n".join([F'- {prop}: {val}' for prop, val in d.items()] ) + "\n" | 711 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Optional
import numpy as np
import torch
import torch.nn as nn
from ..utils import BaseOutput, is_torch_version, randn_tensor
from .attention_processor import SpatialNorm
from .unet_ad_blocks import UNetMidBlockaD, get_down_block, get_up_block
@dataclass
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
_lowerCamelCase: torch.FloatTensor
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[str] ,A_ : Dict=3 ,A_ : int=3 ,A_ : str=("DownEncoderBlock2D",) ,A_ : Dict=(64,) ,A_ : str=2 ,A_ : Union[str, Any]=32 ,A_ : Optional[int]="silu" ,A_ : str=True ,) -> Union[str, Any]:
super().__init__()
A = layers_per_block
A = torch.nn.Convad(
A_ ,block_out_channels[0] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
# down
A = block_out_channels[0]
for i, down_block_type in enumerate(A_ ):
A = output_channel
A = block_out_channels[i]
A = i == len(A_ ) - 1
A = get_down_block(
A_ ,num_layers=self.layers_per_block ,in_channels=A_ ,out_channels=A_ ,add_downsample=not is_final_block ,resnet_eps=1e-6 ,downsample_padding=0 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,)
self.down_blocks.append(A_ )
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# out
A = nn.GroupNorm(num_channels=block_out_channels[-1] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = 2 * out_channels if double_z else out_channels
A = nn.Convad(block_out_channels[-1] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : Optional[int] ) -> Union[str, Any]:
A = x
A = self.conv_in(A_ )
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : Dict ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
# down
if is_torch_version('>=' ,'1.11.0' ):
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,use_reentrant=A_ )
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,use_reentrant=A_ )
else:
for down_block in self.down_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ )
# middle
A = torch.utils.checkpoint.checkpoint(create_custom_forward(self.mid_block ) ,A_ )
else:
# down
for down_block in self.down_blocks:
A = down_block(A_ )
# middle
A = self.mid_block(A_ )
# post-process
A = self.conv_norm_out(A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,A_ : Optional[Any]=3 ,A_ : Optional[int]=3 ,A_ : str=("UpDecoderBlock2D",) ,A_ : Any=(64,) ,A_ : Optional[int]=2 ,A_ : Optional[int]=32 ,A_ : Tuple="silu" ,A_ : Optional[int]="group" ,) -> Any:
super().__init__()
A = layers_per_block
A = nn.Convad(
A_ ,block_out_channels[-1] ,kernel_size=3 ,stride=1 ,padding=1 ,)
A = None
A = nn.ModuleList([] )
A = in_channels if norm_type == 'spatial' else None
# mid
A = UNetMidBlockaD(
in_channels=block_out_channels[-1] ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,output_scale_factor=1 ,resnet_time_scale_shift='default' if norm_type == 'group' else norm_type ,attention_head_dim=block_out_channels[-1] ,resnet_groups=A_ ,temb_channels=A_ ,)
# up
A = list(reversed(A_ ) )
A = reversed_block_out_channels[0]
for i, up_block_type in enumerate(A_ ):
A = output_channel
A = reversed_block_out_channels[i]
A = i == len(A_ ) - 1
A = get_up_block(
A_ ,num_layers=self.layers_per_block + 1 ,in_channels=A_ ,out_channels=A_ ,prev_output_channel=A_ ,add_upsample=not is_final_block ,resnet_eps=1e-6 ,resnet_act_fn=A_ ,resnet_groups=A_ ,attention_head_dim=A_ ,temb_channels=A_ ,resnet_time_scale_shift=A_ ,)
self.up_blocks.append(A_ )
A = output_channel
# out
if norm_type == "spatial":
A = SpatialNorm(block_out_channels[0] ,A_ )
else:
A = nn.GroupNorm(num_channels=block_out_channels[0] ,num_groups=A_ ,eps=1e-6 )
A = nn.SiLU()
A = nn.Convad(block_out_channels[0] ,A_ ,3 ,padding=1 )
A = False
def _SCREAMING_SNAKE_CASE ( self : Union[str, Any] ,A_ : str ,A_ : Union[str, Any]=None ) -> Any:
A = z
A = self.conv_in(A_ )
A = next(iter(self.up_blocks.parameters() ) ).dtype
if self.training and self.gradient_checkpointing:
def create_custom_forward(A_ : List[Any] ):
def custom_forward(*A_ : Tuple ):
return module(*A_ )
return custom_forward
if is_torch_version('>=' ,'1.11.0' ):
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ ,use_reentrant=A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(A_ ) ,A_ ,A_ ,use_reentrant=A_ )
else:
# middle
A = torch.utils.checkpoint.checkpoint(
create_custom_forward(self.mid_block ) ,A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = torch.utils.checkpoint.checkpoint(create_custom_forward(A_ ) ,A_ ,A_ )
else:
# middle
A = self.mid_block(A_ ,A_ )
A = sample.to(A_ )
# up
for up_block in self.up_blocks:
A = up_block(A_ ,A_ )
# post-process
if latent_embeds is None:
A = self.conv_norm_out(A_ )
else:
A = self.conv_norm_out(A_ ,A_ )
A = self.conv_act(A_ )
A = self.conv_out(A_ )
return sample
class lowerCAmelCase_ ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,A_ : Optional[int] ,A_ : Any ,A_ : str ,A_ : Dict=None ,A_ : List[Any]="random" ,A_ : Optional[int]=False ,A_ : str=True ) -> List[str]:
super().__init__()
A = n_e
A = vq_embed_dim
A = beta
A = legacy
A = nn.Embedding(self.n_e ,self.vq_embed_dim )
self.embedding.weight.data.uniform_(-1.0 / self.n_e ,1.0 / self.n_e )
A = remap
if self.remap is not None:
self.register_buffer('used' ,torch.tensor(np.load(self.remap ) ) )
A = self.used.shape[0]
A = unknown_index # "random" or "extra" or integer
if self.unknown_index == "extra":
A = self.re_embed
A = self.re_embed + 1
print(
F'Remapping {self.n_e} indices to {self.re_embed} indices. '
F'Using {self.unknown_index} for unknown indices.' )
else:
A = n_e
A = sane_index_shape
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[int] ) -> Any:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
A = (inds[:, :, None] == used[None, None, ...]).long()
A = match.argmax(-1 )
A = match.sum(2 ) < 1
if self.unknown_index == "random":
A = torch.randint(0 ,self.re_embed ,size=new[unknown].shape ).to(device=new.device )
else:
A = self.unknown_index
return new.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : Optional[Any] ) -> List[Any]:
A = inds.shape
assert len(A_ ) > 1
A = inds.reshape(ishape[0] ,-1 )
A = self.used.to(A_ )
if self.re_embed > self.used.shape[0]: # extra token
A = 0 # simply set to zero
A = torch.gather(used[None, :][inds.shape[0] * [0], :] ,1 ,A_ )
return back.reshape(A_ )
def _SCREAMING_SNAKE_CASE ( self : Optional[int] ,A_ : List[Any] ) -> str:
# reshape z -> (batch, height, width, channel) and flatten
A = z.permute(0 ,2 ,3 ,1 ).contiguous()
A = z.view(-1 ,self.vq_embed_dim )
# distances from z to embeddings e_j (z - e)^2 = z^2 + e^2 - 2 e * z
A = torch.argmin(torch.cdist(A_ ,self.embedding.weight ) ,dim=1 )
A = self.embedding(A_ ).view(z.shape )
A = None
A = None
# compute loss for embedding
if not self.legacy:
A = self.beta * torch.mean((z_q.detach() - z) ** 2 ) + torch.mean((z_q - z.detach()) ** 2 )
else:
A = torch.mean((z_q.detach() - z) ** 2 ) + self.beta * torch.mean((z_q - z.detach()) ** 2 )
# preserve gradients
A = z + (z_q - z).detach()
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
if self.remap is not None:
A = min_encoding_indices.reshape(z.shape[0] ,-1 ) # add batch axis
A = self.remap_to_used(A_ )
A = min_encoding_indices.reshape(-1 ,1 ) # flatten
if self.sane_index_shape:
A = min_encoding_indices.reshape(z_q.shape[0] ,z_q.shape[2] ,z_q.shape[3] )
return z_q, loss, (perplexity, min_encodings, min_encoding_indices)
def _SCREAMING_SNAKE_CASE ( self : Dict ,A_ : Dict ,A_ : str ) -> Union[str, Any]:
# shape specifying (batch, height, width, channel)
if self.remap is not None:
A = indices.reshape(shape[0] ,-1 ) # add batch axis
A = self.unmap_to_all(A_ )
A = indices.reshape(-1 ) # flatten again
# get quantized latent vectors
A = self.embedding(A_ )
if shape is not None:
A = z_q.view(A_ )
# reshape back to match original input shape
A = z_q.permute(0 ,3 ,1 ,2 ).contiguous()
return z_q
class lowerCAmelCase_ ( _lowercase ):
'''simple docstring'''
def __init__( self : str ,A_ : Tuple ,A_ : Dict=False ) -> List[str]:
A = parameters
A , A = torch.chunk(A_ ,2 ,dim=1 )
A = torch.clamp(self.logvar ,-30.0 ,20.0 )
A = deterministic
A = torch.exp(0.5 * self.logvar )
A = torch.exp(self.logvar )
if self.deterministic:
A = A = torch.zeros_like(
self.mean ,device=self.parameters.device ,dtype=self.parameters.dtype )
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Optional[torch.Generator] = None ) -> torch.FloatTensor:
# make sure sample is on the same device as the parameters and has same dtype
A = randn_tensor(
self.mean.shape ,generator=A_ ,device=self.parameters.device ,dtype=self.parameters.dtype )
A = self.mean + self.std * sample
return x
def _SCREAMING_SNAKE_CASE ( self : Any ,A_ : Tuple=None ) -> int:
if self.deterministic:
return torch.Tensor([0.0] )
else:
if other is None:
return 0.5 * torch.sum(torch.pow(self.mean ,2 ) + self.var - 1.0 - self.logvar ,dim=[1, 2, 3] )
else:
return 0.5 * torch.sum(
torch.pow(self.mean - other.mean ,2 ) / other.var
+ self.var / other.var
- 1.0
- self.logvar
+ other.logvar ,dim=[1, 2, 3] ,)
def _SCREAMING_SNAKE_CASE ( self : List[str] ,A_ : List[str] ,A_ : Union[str, Any]=[1, 2, 3] ) -> List[str]:
if self.deterministic:
return torch.Tensor([0.0] )
A = np.log(2.0 * np.pi )
return 0.5 * torch.sum(logtwopi + self.logvar + torch.pow(sample - self.mean ,2 ) / self.var ,dim=A_ )
def _SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[int]:
return self.mean | 22 | 0 |
"""simple docstring"""
import re
def _snake_case ( snake_case__ : str ):
return [char.split() for char in re.split(r'[^ a-z A-Z 0-9 \s]' , str_ )]
def _snake_case ( snake_case__ : str ):
A = split_input(str_ )
return "".join(
[''.join([char.capitalize() for char in sub_str] ) for sub_str in string_split] )
def _snake_case ( snake_case__ : str , snake_case__ : bool , snake_case__ : str ):
try:
A = split_input(a_ )
if upper:
A = ''''''.join(
[
separator.join([char.upper() for char in sub_str] )
for sub_str in string_split
] )
else:
A = ''''''.join(
[
separator.join([char.lower() for char in sub_str] )
for sub_str in string_split
] )
return res_str
except IndexError:
return "not valid string"
def _snake_case ( snake_case__ : str ):
return to_simple_case(a_ )
def _snake_case ( snake_case__ : str ):
try:
A = to_simple_case(a_ )
return res_str[0].lower() + res_str[1:]
except IndexError:
return "not valid string"
def _snake_case ( snake_case__ : str , snake_case__ : bool ):
return to_complex_case(a_ , a_ , '_' )
def _snake_case ( snake_case__ : str , snake_case__ : bool ):
return to_complex_case(a_ , a_ , '-' )
if __name__ == "__main__":
__import__('''doctest''').testmod() | 712 |
"""simple docstring"""
def _snake_case ( snake_case__ : list , snake_case__ : list , snake_case__ : int ):
A = len(snake_case__ )
A = [[0] * n for i in range(snake_case__ )]
for i in range(snake_case__ ):
A = y_points[i]
for i in range(2 , snake_case__ ):
for j in range(snake_case__ , snake_case__ ):
A = (
(xa - x_points[j - i + 1]) * q[j][i - 1]
- (xa - x_points[j]) * q[j - 1][i - 1]
) / (x_points[j] - x_points[j - i + 1])
return [q[n - 1][n - 1], q]
if __name__ == "__main__":
import doctest
doctest.testmod() | 22 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.