code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
"""simple docstring"""
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : Any = '''▁'''
a : List[str] = {'''vocab_file''': '''sentencepiece.bpe.model'''}
a : Tuple = {
'''vocab_file''': {
'''facebook/xglm-564M''': '''https://huggingface.co/facebook/xglm-564M/resolve/main/sentencepiece.bpe.model''',
}
}
a : Optional[int] = {
'''facebook/xglm-564M''': 2_0_4_8,
}
class a_ ( _UpperCAmelCase ):
a : int = VOCAB_FILES_NAMES
a : str = PRETRAINED_VOCAB_FILES_MAP
a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[int] = ['input_ids', 'attention_mask']
def __init__( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Any="<s>" , __UpperCamelCase : Union[str, Any]="</s>" , __UpperCamelCase : Optional[Any]="</s>" , __UpperCamelCase : List[str]="<s>" , __UpperCamelCase : Optional[int]="<unk>" , __UpperCamelCase : Union[str, Any]="<pad>" , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : List[Any] , ) ->None:
'''simple docstring'''
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
# Compatibility with the original tokenizer
_UpperCAmelCase = 7
_UpperCAmelCase = [f"""<madeupword{i}>""" for i in range(self.num_madeup_words )]
_UpperCAmelCase = kwargs.get("""additional_special_tokens""" , [] )
kwargs["additional_special_tokens"] += [
word for word in madeup_words if word not in kwargs["additional_special_tokens"]
]
super().__init__(
bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , cls_token=__UpperCamelCase , pad_token=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(__UpperCamelCase ) )
_UpperCAmelCase = vocab_file
# Original fairseq vocab and spm vocab must be "aligned":
# Vocab | 0 | 1 | 2 | 3 | 4 | 5 | 6 | 7 | 8 | 9
# -------- | ------- | ------- | ------ | ------- | --- | --- | --- | ----- | ----- | ----
# fairseq | '<s>' | '<pad>' | '</s>' | '<unk>' | ',' | '.' | '▁' | 's' | '▁de' | '-'
# spm | '<unk>' | '<s>' | '</s>' | ',' | '.' | '▁' | 's' | '▁de' | '-' | '▁a'
# The first "real" token "," has position 4 in the original fairseq vocab and position 3 in the spm vocab
_UpperCAmelCase = 1
# Mimic fairseq token-to-id alignment for the first 4 token
_UpperCAmelCase = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
_UpperCAmelCase = len(self.sp_model )
_UpperCAmelCase = {f"""<madeupword{i}>""": sp_size + i + self.fairseq_offset for i in range(self.num_madeup_words )}
self.fairseq_tokens_to_ids.update(__UpperCamelCase )
_UpperCAmelCase = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def __getstate__( self : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
_UpperCAmelCase = self.sp_model.serialized_model_proto()
return state
def __setstate__( self : Dict , __UpperCamelCase : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.LoadFromSerializedProto(self.sp_model_proto )
def _snake_case ( self : Optional[int] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
if token_ids_a is None:
return [self.sep_token_id] + token_ids_a
_UpperCAmelCase = [self.sep_token_id]
return sep + token_ids_a + sep + sep + token_ids_a
def _snake_case ( self : Dict , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is None:
return [1] + ([0] * len(__UpperCamelCase ))
return [1] + ([0] * len(__UpperCamelCase )) + [1, 1] + ([0] * len(__UpperCamelCase ))
def _snake_case ( self : Any , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase = [self.sep_token_id]
if token_ids_a is None:
return len(sep + token_ids_a ) * [0]
return len(sep + token_ids_a + sep + sep + token_ids_a ) * [0]
@property
def _snake_case ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
return len(self.sp_model ) + self.fairseq_offset + self.num_madeup_words
def _snake_case ( self : str ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _snake_case ( self : Optional[int] , __UpperCamelCase : str ) ->List[str]:
'''simple docstring'''
return self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
def _snake_case ( self : List[Any] , __UpperCamelCase : Dict ) ->List[str]:
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
_UpperCAmelCase = self.sp_model.PieceToId(__UpperCamelCase )
# Need to return unknown token if the SP model returned 0
return spm_id + self.fairseq_offset if spm_id else self.unk_token_id
def _snake_case ( self : Tuple , __UpperCamelCase : Union[str, Any] ) ->str:
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(index - self.fairseq_offset )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = """""".join(__UpperCamelCase ).replace(__UpperCamelCase , """ """ ).strip()
return out_string
def _snake_case ( self : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , """wb""" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,) | 711 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
a : str = True
except (ImportError, ModuleNotFoundError):
a : List[str] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
re.sub("""<n>""" , """""" , _A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_A ) ) | 19 | 0 |
"""simple docstring"""
from dataclasses import dataclass
from typing import Tuple
import numpy as np
import torch
@dataclass
class a_ :
'''simple docstring'''
a : torch.Tensor # [batch_size x 3]
a : torch.Tensor # [batch_size x 3]
a : torch.Tensor # [batch_size x 3]
a : torch.Tensor # [batch_size x 3]
a : int
a : int
a : float
a : float
a : Tuple[int]
def _snake_case ( self : List[Any] ) ->str:
'''simple docstring'''
assert self.x.shape[0] == self.y.shape[0] == self.z.shape[0] == self.origin.shape[0]
assert self.x.shape[1] == self.y.shape[1] == self.z.shape[1] == self.origin.shape[1] == 3
assert len(self.x.shape ) == len(self.y.shape ) == len(self.z.shape ) == len(self.origin.shape ) == 2
def _snake_case ( self : Tuple ) ->Dict:
'''simple docstring'''
return torch.from_numpy(np.array([self.width, self.height] , dtype=np.floataa ) )
def _snake_case ( self : Optional[int] ) ->str:
'''simple docstring'''
return torch.from_numpy(np.array([self.x_fov, self.y_fov] , dtype=np.floataa ) )
def _snake_case ( self : str ) ->torch.Tensor:
'''simple docstring'''
_UpperCAmelCase = torch.arange(self.height * self.width )
_UpperCAmelCase = torch.stack(
[
pixel_indices % self.width,
torch.div(__UpperCamelCase , self.width , rounding_mode="""trunc""" ),
] , axis=1 , )
return coords
@property
def _snake_case ( self : int ) ->int:
'''simple docstring'''
_UpperCAmelCase ,*_UpperCAmelCase = self.shape
_UpperCAmelCase = int(np.prod(__UpperCamelCase ) )
_UpperCAmelCase = self.get_image_coords()
_UpperCAmelCase = torch.broadcast_to(coords.unsqueeze(0 ) , [batch_size * inner_batch_size, *coords.shape] )
_UpperCAmelCase = self.get_camera_rays(__UpperCamelCase )
_UpperCAmelCase = rays.view(__UpperCamelCase , inner_batch_size * self.height * self.width , 2 , 3 )
return rays
def _snake_case ( self : int , __UpperCamelCase : torch.Tensor ) ->torch.Tensor:
'''simple docstring'''
_UpperCAmelCase ,*_UpperCAmelCase ,_UpperCAmelCase = coords.shape
assert n_coords == 2
assert batch_size == self.origin.shape[0]
_UpperCAmelCase = coords.view(__UpperCamelCase , -1 , 2 )
_UpperCAmelCase = self.resolution()
_UpperCAmelCase = self.fov()
_UpperCAmelCase = (flat.float() / (res - 1)) * 2 - 1
_UpperCAmelCase = fracs * torch.tan(fov / 2 )
_UpperCAmelCase = fracs.view(__UpperCamelCase , -1 , 2 )
_UpperCAmelCase = (
self.z.view(__UpperCamelCase , 1 , 3 )
+ self.x.view(__UpperCamelCase , 1 , 3 ) * fracs[:, :, :1]
+ self.y.view(__UpperCamelCase , 1 , 3 ) * fracs[:, :, 1:]
)
_UpperCAmelCase = directions / directions.norm(dim=-1 , keepdim=__UpperCamelCase )
_UpperCAmelCase = torch.stack(
[
torch.broadcast_to(self.origin.view(__UpperCamelCase , 1 , 3 ) , [batch_size, directions.shape[1], 3] ),
directions,
] , dim=2 , )
return rays.view(__UpperCamelCase , *__UpperCamelCase , 2 , 3 )
def _snake_case ( self : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->"DifferentiableProjectiveCamera":
'''simple docstring'''
assert width * self.height == height * self.width, "The aspect ratio should not change."
return DifferentiableProjectiveCamera(
origin=self.origin , x=self.x , y=self.y , z=self.z , width=__UpperCamelCase , height=__UpperCamelCase , x_fov=self.x_fov , y_fov=self.y_fov , )
def _UpperCamelCase ( _A ) -> DifferentiableProjectiveCamera:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
for theta in np.linspace(0 , 2 * np.pi , num=2_0 ):
_UpperCAmelCase = np.array([np.sin(_A ), np.cos(_A ), -0.5] )
z /= np.sqrt(np.sum(z**2 ) )
_UpperCAmelCase = -z * 4
_UpperCAmelCase = np.array([np.cos(_A ), -np.sin(_A ), 0.0] )
_UpperCAmelCase = np.cross(_A , _A )
origins.append(_A )
xs.append(_A )
ys.append(_A )
zs.append(_A )
return DifferentiableProjectiveCamera(
origin=torch.from_numpy(np.stack(_A , axis=0 ) ).float() , x=torch.from_numpy(np.stack(_A , axis=0 ) ).float() , y=torch.from_numpy(np.stack(_A , axis=0 ) ).float() , z=torch.from_numpy(np.stack(_A , axis=0 ) ).float() , width=_A , height=_A , x_fov=0.7 , y_fov=0.7 , shape=(1, len(_A )) , ) | 712 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a : str = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a_ :
a : List[Any] = PegasusConfig
a : Dict = {}
a : List[Any] = 'gelu'
def __init__( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple=13 , __UpperCamelCase : Tuple=7 , __UpperCamelCase : Tuple=True , __UpperCamelCase : Any=False , __UpperCamelCase : Any=99 , __UpperCamelCase : Optional[int]=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Dict=37 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Optional[Any]=20 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Tuple=0 , ) ->int:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def _snake_case ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _snake_case ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _UpperCamelCase ( _A , _A , _A , _A=None , _A=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = np.not_equal(_A , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a_ ( _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a : Any = True
a : int = False
a : Union[str, Any] = False
a : Optional[int] = False
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase )
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(__UpperCamelCase : List[Any] , __UpperCamelCase : str=None , **__UpperCamelCase : int ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_UpperCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self : int ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__UpperCamelCase )
_UpperCAmelCase = np.ones((1, 1) )
_UpperCAmelCase = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_UpperCAmelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""np""" , truncation=__UpperCamelCase , max_length=5_12 , padding=__UpperCamelCase )
_UpperCAmelCase = model.generate(**__UpperCamelCase , num_beams=2 ).sequences
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert tgt_text == decoded | 19 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : List[Any] = {
'''BridgeTower/bridgetower-base''': '''https://huggingface.co/BridgeTower/bridgetower-base/blob/main/config.json''',
'''BridgeTower/bridgetower-base-itm-mlm''': (
'''https://huggingface.co/BridgeTower/bridgetower-base-itm-mlm/blob/main/config.json'''
),
}
class a_ ( _UpperCAmelCase ):
a : str = 'bridgetower_vision_model'
def __init__( self : Optional[int] , __UpperCamelCase : Union[str, Any]=7_68 , __UpperCamelCase : Optional[Any]=12 , __UpperCamelCase : Dict=3 , __UpperCamelCase : Optional[int]=16 , __UpperCamelCase : str=2_88 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : Optional[int]=1e-05 , __UpperCamelCase : Dict=False , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Tuple=False , **__UpperCamelCase : Tuple , ) ->int:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = stop_gradient
_UpperCAmelCase = share_layernorm
_UpperCAmelCase = remove_last_layer
@classmethod
def _snake_case ( cls : Tuple , __UpperCamelCase : Union[str, os.PathLike] , **__UpperCamelCase : int ) ->"PretrainedConfig":
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
if config_dict.get("""model_type""" ) == "bridgetower":
_UpperCAmelCase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'bridgetower_text_model'
def __init__( self : str , __UpperCamelCase : Optional[Any]=5_02_65 , __UpperCamelCase : Optional[Any]=7_68 , __UpperCamelCase : Any=12 , __UpperCamelCase : Tuple=12 , __UpperCamelCase : int=1 , __UpperCamelCase : List[str]=30_72 , __UpperCamelCase : Union[str, Any]="gelu" , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : List[str]=5_14 , __UpperCamelCase : Dict=1 , __UpperCamelCase : Any=1e-05 , __UpperCamelCase : List[str]=1 , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : str="absolute" , __UpperCamelCase : List[str]=True , **__UpperCamelCase : Dict , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
@classmethod
def _snake_case ( cls : Optional[int] , __UpperCamelCase : Union[str, os.PathLike] , **__UpperCamelCase : List[Any] ) ->"PretrainedConfig":
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
if config_dict.get("""model_type""" ) == "bridgetower":
_UpperCAmelCase = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class a_ ( _UpperCAmelCase ):
a : Dict = 'bridgetower'
def __init__( self : Any , __UpperCamelCase : Tuple=True , __UpperCamelCase : Any="gelu" , __UpperCamelCase : str=7_68 , __UpperCamelCase : Dict=1 , __UpperCamelCase : List[Any]=1e-05 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : str="add" , __UpperCamelCase : Union[str, Any]=12 , __UpperCamelCase : Tuple=6 , __UpperCamelCase : Any=False , __UpperCamelCase : str=False , __UpperCamelCase : Tuple=None , __UpperCamelCase : Any=None , **__UpperCamelCase : Optional[int] , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = kwargs.pop("""text_config_dict""" , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""vision_config_dict""" , __UpperCamelCase )
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = share_cross_modal_transformer_layers
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_size
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = share_link_tower_layers
_UpperCAmelCase = link_tower_type
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = tie_word_embeddings
_UpperCAmelCase = init_layernorm_from_vision_encoder
if text_config is None:
_UpperCAmelCase = {}
logger.info("""`text_config` is `None`. Initializing the `BridgeTowerTextConfig` with default values.""" )
if vision_config is None:
_UpperCAmelCase = {}
logger.info("""`vision_config` is `None`. Initializing the `BridgeTowerVisionConfig` with default values.""" )
_UpperCAmelCase = BridgeTowerTextConfig(**__UpperCamelCase )
_UpperCAmelCase = BridgeTowerVisionConfig(**__UpperCamelCase )
@classmethod
def _snake_case ( cls : List[str] , __UpperCamelCase : BridgeTowerTextConfig , __UpperCamelCase : BridgeTowerVisionConfig , **__UpperCamelCase : List[Any] ) ->Optional[Any]:
'''simple docstring'''
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **__UpperCamelCase )
def _snake_case ( self : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.text_config.to_dict()
_UpperCAmelCase = self.vision_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output | 713 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : Optional[int]=13 , __UpperCamelCase : List[str]=7 , __UpperCamelCase : List[Any]=9 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : str=True , __UpperCamelCase : int=False , __UpperCamelCase : int=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : List[Any]=8 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : str=0.0_0_2 , __UpperCamelCase : Union[str, Any]=1 , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : Tuple=0 , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Any=None , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = encoder_seq_length
_UpperCAmelCase = decoder_seq_length
# For common tests
_UpperCAmelCase = self.decoder_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = d_ff
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = decoder_start_token_id
_UpperCAmelCase = None
_UpperCAmelCase = decoder_layers
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
return TaConfig.from_pretrained("""google/umt5-base""" )
def _snake_case ( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : str=None , ) ->int:
'''simple docstring'''
if attention_mask is None:
_UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCamelCase )
if decoder_head_mask is None:
_UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
if cross_attn_head_mask is None:
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = config.num_attention_heads
_UpperCAmelCase = self.prepare_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, input_dict
def _snake_case ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : Tuple ) ->Dict:
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , )
_UpperCAmelCase = model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase )
_UpperCAmelCase = result.last_hidden_state
_UpperCAmelCase = result.past_key_values
_UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , ) ->str:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).get_decoder().to(__UpperCamelCase ).eval()
# first forward pass
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) + 1 )
_UpperCAmelCase ,_UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = model(__UpperCamelCase )["""last_hidden_state"""]
_UpperCAmelCase = model(__UpperCamelCase , past_key_values=__UpperCamelCase )["""last_hidden_state"""]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def _snake_case ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Dict , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).to(__UpperCamelCase ).half().eval()
_UpperCAmelCase = model(**__UpperCamelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__UpperCamelCase ).any().item() )
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a : Optional[Any] = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a : Any = True
a : Optional[int] = False
a : Any = False
a : Optional[int] = True
a : Optional[Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a : int = [0.8, 0.9]
def _snake_case ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = UMTaModel(config_and_inputs[0] ).to(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=__UpperCamelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs[0]
_UpperCAmelCase = UMTaForConditionalGeneration(__UpperCamelCase ).eval()
model.to(__UpperCamelCase )
_UpperCAmelCase = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__UpperCamelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
}
for attn_name, (name, mask) in zip(__UpperCamelCase , head_masking.items() ):
_UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCamelCase )
_UpperCAmelCase = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , **__UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _snake_case ( self : Tuple ) ->List[Any]:
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__UpperCamelCase ).to(__UpperCamelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__UpperCamelCase , legacy=__UpperCamelCase )
_UpperCAmelCase = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase ).input_ids
# fmt: off
_UpperCAmelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model.generate(input_ids.to(__UpperCamelCase ) )
_UpperCAmelCase = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import PIL
import torch
from torchvision import transforms
from diffusers.pipeline_utils import DiffusionPipeline, ImagePipelineOutput
from diffusers.schedulers import DDIMScheduler
from diffusers.utils import randn_tensor
a : Any = transforms.Compose(
[
transforms.Resize((2_5_6, 2_5_6)),
transforms.ToTensor(),
transforms.Normalize([0.5], [0.5]),
]
)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
if isinstance(_A , torch.Tensor ):
return image
elif isinstance(_A , PIL.Image.Image ):
_UpperCAmelCase = [image]
_UpperCAmelCase = [trans(img.convert("""RGB""" ) ) for img in image]
_UpperCAmelCase = torch.stack(_A )
return image
class a_ ( _UpperCAmelCase ):
def __init__( self : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Any ) ->Optional[Any]:
'''simple docstring'''
super().__init__()
# make sure scheduler can always be converted to DDIM
_UpperCAmelCase = DDIMScheduler.from_config(scheduler.config )
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
def _snake_case ( self : Any , __UpperCamelCase : Union[str, Any] ) ->Any:
'''simple docstring'''
if strength < 0 or strength > 1:
raise ValueError(f"""The value of strength should in [0.0, 1.0] but is {strength}""" )
def _snake_case ( self : Dict , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : int ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = min(int(num_inference_steps * strength ) , __UpperCamelCase )
_UpperCAmelCase = max(num_inference_steps - init_timestep , 0 )
_UpperCAmelCase = self.scheduler.timesteps[t_start:]
return timesteps, num_inference_steps - t_start
def _snake_case ( self : int , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : str=None ) ->Tuple:
'''simple docstring'''
if not isinstance(__UpperCamelCase , (torch.Tensor, PIL.Image.Image, list) ):
raise ValueError(
f"""`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(__UpperCamelCase )}""" )
_UpperCAmelCase = image.to(device=__UpperCamelCase , dtype=__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase = init_latents.shape
_UpperCAmelCase = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=__UpperCamelCase , dtype=__UpperCamelCase )
# get latents
print("""add noise to latents at timestep""" , __UpperCamelCase )
_UpperCAmelCase = self.scheduler.add_noise(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = init_latents
return latents
@torch.no_grad()
def __call__( self : int , __UpperCamelCase : Union[torch.FloatTensor, PIL.Image.Image] = None , __UpperCamelCase : float = 0.8 , __UpperCamelCase : int = 1 , __UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 50 , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , ) ->Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
self.check_inputs(__UpperCamelCase )
# 2. Preprocess image
_UpperCAmelCase = preprocess(__UpperCamelCase )
# 3. set timesteps
self.scheduler.set_timesteps(__UpperCamelCase , device=self.device )
_UpperCAmelCase ,_UpperCAmelCase = self.get_timesteps(__UpperCamelCase , __UpperCamelCase , self.device )
_UpperCAmelCase = timesteps[:1].repeat(__UpperCamelCase )
# 4. Prepare latent variables
_UpperCAmelCase = self.prepare_latents(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , self.unet.dtype , self.device , __UpperCamelCase )
_UpperCAmelCase = latents
# 5. Denoising loop
for t in self.progress_bar(__UpperCamelCase ):
# 1. predict noise model_output
_UpperCAmelCase = self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# 2. predict previous mean of image x_t-1 and add variance depending on eta
# eta corresponds to η in paper and should be between [0, 1]
# do x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(
__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , eta=__UpperCamelCase , use_clipped_model_output=__UpperCamelCase , generator=__UpperCamelCase , ).prev_sample
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (image, latent_timestep.item())
return ImagePipelineOutput(images=__UpperCamelCase ) | 714 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( _UpperCAmelCase ):
def _snake_case ( self : str ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : Any ) ->List[str]:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _snake_case ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : str ) ->Dict:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def _snake_case ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def _snake_case ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _snake_case ( self : str ) ->Optional[Any]:
'''simple docstring'''
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__UpperCamelCase ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
_UpperCAmelCase ,_UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __UpperCamelCase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(_A ) if isinstance(_A , pa.Buffer ) else pa.memory_map(_A )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=_A , features=_A ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_A )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
_UpperCAmelCase = os.path.join(_A , """test.arrow""" )
with ArrowWriter(path=_A , schema=pa.schema(_A ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(_A , 1 )
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
if pa.types.is_list(_A ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
if isinstance(lst[0] , _A ):
change_first_primitive_element_in_list(lst[0] , _A )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(_A , optimized_int_type=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(_A )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_A , _A )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=_A ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = """mock://dataset-train.arrow"""
with ArrowWriter(path=_A , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_A ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_A )
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_A , format="""png""" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=_A , features=Features({"""image""": Image()} ) , embed_local_files=_A ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , _A )
with open(_A , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("""col_1""" , pa.string() , nullable=_A )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=_A ) as writer:
writer._build_writer(inferred_schema=_A )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] ) | 19 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : List[str] = {
'''configuration_longformer''': [
'''LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''LongformerConfig''',
'''LongformerOnnxConfig''',
],
'''tokenization_longformer''': ['''LongformerTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = ['''LongformerTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''LongformerForMaskedLM''',
'''LongformerForMultipleChoice''',
'''LongformerForQuestionAnswering''',
'''LongformerForSequenceClassification''',
'''LongformerForTokenClassification''',
'''LongformerModel''',
'''LongformerPreTrainedModel''',
'''LongformerSelfAttention''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
'''TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFLongformerForMaskedLM''',
'''TFLongformerForMultipleChoice''',
'''TFLongformerForQuestionAnswering''',
'''TFLongformerForSequenceClassification''',
'''TFLongformerForTokenClassification''',
'''TFLongformerModel''',
'''TFLongformerPreTrainedModel''',
'''TFLongformerSelfAttention''',
]
if TYPE_CHECKING:
from .configuration_longformer import (
LONGFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
LongformerConfig,
LongformerOnnxConfig,
)
from .tokenization_longformer import LongformerTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_longformer_fast import LongformerTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_longformer import (
LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
LongformerForMaskedLM,
LongformerForMultipleChoice,
LongformerForQuestionAnswering,
LongformerForSequenceClassification,
LongformerForTokenClassification,
LongformerModel,
LongformerPreTrainedModel,
LongformerSelfAttention,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_longformer import (
TF_LONGFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TFLongformerForMaskedLM,
TFLongformerForMultipleChoice,
TFLongformerForQuestionAnswering,
TFLongformerForSequenceClassification,
TFLongformerForTokenClassification,
TFLongformerModel,
TFLongformerPreTrainedModel,
TFLongformerSelfAttention,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 715 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : List[Any] = get_logger()
a : Optional[dict] = None
class a_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : int ) ->Tuple:
'''simple docstring'''
super().__init__(features=__UpperCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(__UpperCamelCase )}, as `jaxlib.xla_extension.Device` """
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
_UpperCAmelCase = device if isinstance(__UpperCamelCase , __UpperCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
_UpperCAmelCase = str(jax.devices()[0] )
_UpperCAmelCase = jnp_array_kwargs
@staticmethod
def _snake_case ( ) ->Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(__UpperCamelCase ): device for device in jax.devices()}
def _snake_case ( self : Dict , __UpperCamelCase : Any ) ->Union[str, Any]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , __UpperCamelCase ) and column:
if all(
isinstance(__UpperCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__UpperCamelCase , axis=0 )
return column
def _snake_case ( self : List[str] , __UpperCamelCase : Any ) ->Optional[int]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , (str, bytes, type(__UpperCamelCase )) ):
return value
elif isinstance(__UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase = {}
if isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
else:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
elif isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCamelCase , PIL.Image.Image ):
_UpperCAmelCase = np.asarray(__UpperCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__UpperCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__UpperCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__UpperCamelCase , """__array__""" ) and not isinstance(__UpperCamelCase , jax.Array ):
_UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
elif isinstance(__UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : dict ) ->int:
'''simple docstring'''
return map_nested(self._recursive_tensorize , __UpperCamelCase , map_list=__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_row(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_row(__UpperCamelCase )
return self.recursive_tensorize(__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : pa.Table ) ->"jax.Array":
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_column(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_column(__UpperCamelCase , pa_table.column_names[0] )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
_UpperCAmelCase = self._consolidate(__UpperCamelCase )
return column
def _snake_case ( self : Optional[Any] , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_batch(__UpperCamelCase )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
for column_name in batch:
_UpperCAmelCase = self._consolidate(batch[column_name] )
return batch | 19 | 0 |
"""simple docstring"""
import re
from ..utils import cached_file
# docstyle-ignore
a : Dict = '''
Human: <<task>>
Assistant: '''
a : List[str] = '''huggingface-tools/default-prompts'''
a : Union[str, Any] = {'''chat''': '''chat_prompt_template.txt''', '''run''': '''run_prompt_template.txt'''}
def _UpperCamelCase ( _A , _A , _A="run" ) -> List[str]:
"""simple docstring"""
if prompt_or_repo_id is None:
_UpperCAmelCase = DEFAULT_PROMPTS_REPO
# prompt is considered a repo ID when it does not contain any kind of space
if re.search("""\\s""" , _A ) is not None:
return prompt_or_repo_id
_UpperCAmelCase = cached_file(
_A , PROMPT_FILES[mode] , repo_type="""dataset""" , user_agent={"""agent""": agent_name} )
with open(_A , """r""" , encoding="""utf-8""" ) as f:
return f.read() | 716 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Tuple = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 19 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig, OnnxSeqaSeqConfigWithPast
from ...utils import logging
if TYPE_CHECKING:
from ...feature_extraction_utils import FeatureExtractionMixin
from ...tokenization_utils_base import PreTrainedTokenizerBase
from ...utils import TensorType
a : Tuple = logging.get_logger(__name__)
a : Tuple = {
'''openai/whisper-base''': '''https://huggingface.co/openai/whisper-base/resolve/main/config.json''',
}
# fmt: off
a : Any = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_7, 3_6_6, 4_3_8, 5_3_2, 6_8_5,
7_0_5, 7_9_6, 9_3_0, 1_0_5_8, 1_2_2_0, 1_2_6_7, 1_2_7_9, 1_3_0_3, 1_3_4_3, 1_3_7_7,
1_3_9_1, 1_6_3_5, 1_7_8_2, 1_8_7_5, 2_1_6_2, 2_3_6_1, 2_4_8_8, 3_4_6_7, 4_0_0_8, 4_2_1_1,
4_6_0_0, 4_8_0_8, 5_2_9_9, 5_8_5_5, 6_3_2_9, 7_2_0_3, 9_6_0_9, 9_9_5_9, 1_0_5_6_3, 1_0_7_8_6,
1_1_4_2_0, 1_1_7_0_9, 1_1_9_0_7, 1_3_1_6_3, 1_3_6_9_7, 1_3_7_0_0, 1_4_8_0_8, 1_5_3_0_6, 1_6_4_1_0, 1_6_7_9_1,
1_7_9_9_2, 1_9_2_0_3, 1_9_5_1_0, 2_0_7_2_4, 2_2_3_0_5, 2_2_9_3_5, 2_7_0_0_7, 3_0_1_0_9, 3_0_4_2_0, 3_3_4_0_9,
3_4_9_4_9, 4_0_2_8_3, 4_0_4_9_3, 4_0_5_4_9, 4_7_2_8_2, 4_9_1_4_6, 5_0_2_5_7, 5_0_3_5_9, 5_0_3_6_0, 5_0_3_6_1
]
a : Tuple = [
1, 2, 7, 8, 9, 1_0, 1_4, 2_5,
2_6, 2_7, 2_8, 2_9, 3_1, 5_8, 5_9, 6_0, 6_1, 6_2,
6_3, 9_0, 9_1, 9_2, 9_3, 3_5_9, 5_0_3, 5_2_2, 5_4_2, 8_7_3,
8_9_3, 9_0_2, 9_1_8, 9_2_2, 9_3_1, 1_3_5_0, 1_8_5_3, 1_9_8_2, 2_4_6_0, 2_6_2_7,
3_2_4_6, 3_2_5_3, 3_2_6_8, 3_5_3_6, 3_8_4_6, 3_9_6_1, 4_1_8_3, 4_6_6_7, 6_5_8_5, 6_6_4_7,
7_2_7_3, 9_0_6_1, 9_3_8_3, 1_0_4_2_8, 1_0_9_2_9, 1_1_9_3_8, 1_2_0_3_3, 1_2_3_3_1, 1_2_5_6_2, 1_3_7_9_3,
1_4_1_5_7, 1_4_6_3_5, 1_5_2_6_5, 1_5_6_1_8, 1_6_5_5_3, 1_6_6_0_4, 1_8_3_6_2, 1_8_9_5_6, 2_0_0_7_5, 2_1_6_7_5,
2_2_5_2_0, 2_6_1_3_0, 2_6_1_6_1, 2_6_4_3_5, 2_8_2_7_9, 2_9_4_6_4, 3_1_6_5_0, 3_2_3_0_2, 3_2_4_7_0, 3_6_8_6_5,
4_2_8_6_3, 4_7_4_2_5, 4_9_8_7_0, 5_0_2_5_4, 5_0_2_5_8, 5_0_3_6_0, 5_0_3_6_1, 5_0_3_6_2
]
class a_ ( _UpperCAmelCase ):
a : Optional[Any] = 'whisper'
a : List[str] = ['past_key_values']
a : str = {'num_attention_heads': 'encoder_attention_heads', 'hidden_size': 'd_model'}
def __init__( self : int , __UpperCamelCase : List[str]=5_18_65 , __UpperCamelCase : Optional[Any]=80 , __UpperCamelCase : Dict=6 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : List[Any]=6 , __UpperCamelCase : Optional[Any]=4 , __UpperCamelCase : Any=15_36 , __UpperCamelCase : List[Any]=15_36 , __UpperCamelCase : int=0.0 , __UpperCamelCase : Union[str, Any]=0.0 , __UpperCamelCase : Tuple=5_02_57 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Any=True , __UpperCamelCase : int="gelu" , __UpperCamelCase : List[str]=2_56 , __UpperCamelCase : str=0.0 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : int=False , __UpperCamelCase : List[Any]=15_00 , __UpperCamelCase : Union[str, Any]=4_48 , __UpperCamelCase : int=5_02_56 , __UpperCamelCase : List[str]=5_02_56 , __UpperCamelCase : Tuple=5_02_56 , __UpperCamelCase : int=None , __UpperCamelCase : Optional[Any]=[2_20, 5_02_56] , __UpperCamelCase : Any=False , __UpperCamelCase : Tuple=2_56 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[str]=0.0_5 , __UpperCamelCase : Optional[int]=10 , __UpperCamelCase : str=2 , __UpperCamelCase : List[Any]=0.0 , __UpperCamelCase : Dict=10 , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : Union[str, Any]=7 , **__UpperCamelCase : List[str] , ) ->Any:
'''simple docstring'''
_UpperCAmelCase = vocab_size
_UpperCAmelCase = num_mel_bins
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = use_cache
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = scale_embedding # scale factor will be sqrt(d_model) if True
_UpperCAmelCase = max_source_positions
_UpperCAmelCase = max_target_positions
# Audio Classification-specific parameters. Feel free to ignore for other classes.
_UpperCAmelCase = classifier_proj_size
_UpperCAmelCase = use_weighted_layer_sum
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase = apply_spec_augment
_UpperCAmelCase = mask_time_prob
_UpperCAmelCase = mask_time_length
_UpperCAmelCase = mask_time_min_masks
_UpperCAmelCase = mask_feature_prob
_UpperCAmelCase = mask_feature_length
_UpperCAmelCase = mask_feature_min_masks
_UpperCAmelCase = median_filter_width
super().__init__(
pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , is_encoder_decoder=__UpperCamelCase , decoder_start_token_id=__UpperCamelCase , suppress_tokens=__UpperCamelCase , begin_suppress_tokens=__UpperCamelCase , **__UpperCamelCase , )
class a_ ( _UpperCAmelCase ):
@property
def _snake_case ( self : Union[str, Any] ) ->Mapping[str, Mapping[int, str]]:
'''simple docstring'''
_UpperCAmelCase = OrderedDict(
[
("""input_features""", {0: """batch""", 1: """feature_size""", 2: """encoder_sequence"""}),
] )
if self.use_past:
_UpperCAmelCase = {0: """batch"""}
else:
_UpperCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__UpperCamelCase , direction="""inputs""" )
return common_inputs
def _snake_case ( self : Tuple , __UpperCamelCase : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] , __UpperCamelCase : int = -1 , __UpperCamelCase : int = -1 , __UpperCamelCase : bool = False , __UpperCamelCase : Optional["TensorType"] = None , __UpperCamelCase : int = 2_20_50 , __UpperCamelCase : float = 5.0 , __UpperCamelCase : int = 2_20 , ) ->Mapping[str, Any]:
'''simple docstring'''
_UpperCAmelCase = OrderedDict()
_UpperCAmelCase = OnnxConfig.generate_dummy_inputs(
self , preprocessor=preprocessor.feature_extractor , batch_size=__UpperCamelCase , framework=__UpperCamelCase , sampling_rate=__UpperCamelCase , time_duration=__UpperCamelCase , frequency=__UpperCamelCase , )
_UpperCAmelCase = encoder_inputs["""input_features"""].shape[2]
_UpperCAmelCase = encoder_sequence_length // 2 if self.use_past else seq_length
_UpperCAmelCase = super().generate_dummy_inputs(
preprocessor.tokenizer , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = encoder_inputs.pop("""input_features""" )
_UpperCAmelCase = decoder_inputs.pop("""decoder_input_ids""" )
if "past_key_values" in decoder_inputs:
_UpperCAmelCase = decoder_inputs.pop("""past_key_values""" )
return dummy_inputs
@property
def _snake_case ( self : Union[str, Any] ) ->float:
'''simple docstring'''
return 1e-3 | 717 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
a : int = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _UpperCamelCase ( _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = test_results.split(""" """ )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_UpperCAmelCase = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(_A ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = None
_UpperCAmelCase = False
for line in failures_short_lines.split("""\n""" ):
if re.search(R"""_ \[doctest\]""" , _A ):
_UpperCAmelCase = True
_UpperCAmelCase = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
_UpperCAmelCase = line
_UpperCAmelCase = False
return failures
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = title
_UpperCAmelCase = doc_test_results["""time_spent"""].split(""",""" )[0]
_UpperCAmelCase = doc_test_results["""success"""]
_UpperCAmelCase = doc_test_results["""failures"""]
_UpperCAmelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
_UpperCAmelCase = doc_test_results
@property
def _snake_case ( self : int ) ->str:
'''simple docstring'''
_UpperCAmelCase = [self._time_spent]
_UpperCAmelCase = 0
for time in time_spent:
_UpperCAmelCase = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = [0, 0, time_parts[0]]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f"""{int(__UpperCamelCase )}h{int(__UpperCamelCase )}m{int(__UpperCamelCase )}s"""
@property
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = 40
_UpperCAmelCase = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(__UpperCamelCase , __UpperCamelCase )}
_UpperCAmelCase = """"""
for category, failures in category_failures.items():
if len(__UpperCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__UpperCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__UpperCamelCase )
@staticmethod
def _snake_case ( ) ->Any:
'''simple docstring'''
_UpperCAmelCase = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(__UpperCamelCase )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=__UpperCamelCase , )
def _snake_case ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
_UpperCAmelCase = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else """All tests passed."""
_UpperCAmelCase = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=__UpperCamelCase , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase = """"""
for key, value in failures.items():
_UpperCAmelCase = value[:2_00] + """ [Truncated]""" if len(__UpperCamelCase ) > 2_50 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
_UpperCAmelCase = job_name
_UpperCAmelCase = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
_UpperCAmelCase = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _snake_case ( self : int ) ->Optional[Any]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
_UpperCAmelCase = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
_UpperCAmelCase = sorted(self.doc_test_results.items() , key=lambda __UpperCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
_UpperCAmelCase = f"""*Num failures* :{len(job_result["failed"] )} \n"""
_UpperCAmelCase = job_result["""failures"""]
_UpperCAmelCase = self.get_reply_blocks(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , text=__UpperCamelCase )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f"""Results for {job}""" , blocks=__UpperCamelCase , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = os.environ["""GITHUB_RUN_ID"""]
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A ).json()
_UpperCAmelCase = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , _A )
return {}
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {}
if os.path.exists(_A ):
_UpperCAmelCase = os.listdir(_A )
for file in files:
try:
with open(os.path.join(_A , _A ) , encoding="""utf-8""" ) as f:
_UpperCAmelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(_A , _A )}.""" ) from e
return _artifact
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
class a_ :
def __init__( self : List[Any] , __UpperCamelCase : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = name
_UpperCAmelCase = []
def __str__( self : int ) ->Optional[Any]:
'''simple docstring'''
return self.name
def _snake_case ( self : Dict , __UpperCamelCase : str ) ->int:
'''simple docstring'''
self.paths.append({"""name""": self.name, """path""": path} )
_UpperCAmelCase = {}
_UpperCAmelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
_UpperCAmelCase = directory
if artifact_name not in _available_artifacts:
_UpperCAmelCase = Artifact(_A )
_available_artifacts[artifact_name].add_path(_A )
return _available_artifacts
if __name__ == "__main__":
a : Dict = get_job_links()
a : Dict = retrieve_available_artifacts()
a : Optional[int] = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
a : Dict = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
a : int = github_actions_job_links.get('''run_doctests''')
a : Tuple = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
a : Optional[Any] = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
a , a , a : str = handle_test_results(artifact['''stats'''])
a : Tuple = failed
a : int = success
a : Any = time_spent[1:-1] + ''', '''
a : Dict = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
a : List[Any] = line.replace('''FAILED ''', '''''')
a : Tuple = line.split()[0].replace('''\n''', '''''')
if "::" in line:
a , a : Union[str, Any] = line.split('''::''')
else:
a , a : Optional[Any] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
a : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
a : Optional[Any] = all_failures[test] if test in all_failures else '''N/A'''
a : List[str] = failure
break
a : List[Any] = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply() | 19 | 0 |
import numpy as np
class a_ :
def __init__( self : List[str] , __UpperCamelCase : Dict=None , __UpperCamelCase : str=None , __UpperCamelCase : str=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[Any]=None ) ->Union[str, Any]:
'''simple docstring'''
self.set_matricies(red=__UpperCamelCase , green=__UpperCamelCase , blue=__UpperCamelCase , red_edge=__UpperCamelCase , nir=__UpperCamelCase )
def _snake_case ( self : str , __UpperCamelCase : Dict=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : str=None , __UpperCamelCase : int=None ) ->Any:
'''simple docstring'''
if red is not None:
_UpperCAmelCase = red
if green is not None:
_UpperCAmelCase = green
if blue is not None:
_UpperCAmelCase = blue
if red_edge is not None:
_UpperCAmelCase = red_edge
if nir is not None:
_UpperCAmelCase = nir
return True
def _snake_case ( self : Tuple , __UpperCamelCase : List[str]="" , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Dict=None ) ->Optional[Any]:
'''simple docstring'''
self.set_matricies(red=__UpperCamelCase , green=__UpperCamelCase , blue=__UpperCamelCase , red_edge=__UpperCamelCase , nir=__UpperCamelCase )
_UpperCAmelCase = {
"""ARVI2""": self.arvaa,
"""CCCI""": self.ccci,
"""CVI""": self.cvi,
"""GLI""": self.gli,
"""NDVI""": self.ndvi,
"""BNDVI""": self.bndvi,
"""redEdgeNDVI""": self.red_edge_ndvi,
"""GNDVI""": self.gndvi,
"""GBNDVI""": self.gbndvi,
"""GRNDVI""": self.grndvi,
"""RBNDVI""": self.rbndvi,
"""PNDVI""": self.pndvi,
"""ATSAVI""": self.atsavi,
"""BWDRVI""": self.bwdrvi,
"""CIgreen""": self.ci_green,
"""CIrededge""": self.ci_rededge,
"""CI""": self.ci,
"""CTVI""": self.ctvi,
"""GDVI""": self.gdvi,
"""EVI""": self.evi,
"""GEMI""": self.gemi,
"""GOSAVI""": self.gosavi,
"""GSAVI""": self.gsavi,
"""Hue""": self.hue,
"""IVI""": self.ivi,
"""IPVI""": self.ipvi,
"""I""": self.i,
"""RVI""": self.rvi,
"""MRVI""": self.mrvi,
"""MSAVI""": self.m_savi,
"""NormG""": self.norm_g,
"""NormNIR""": self.norm_nir,
"""NormR""": self.norm_r,
"""NGRDI""": self.ngrdi,
"""RI""": self.ri,
"""S""": self.s,
"""IF""": self._if,
"""DVI""": self.dvi,
"""TVI""": self.tvi,
"""NDRE""": self.ndre,
}
try:
return funcs[index]()
except KeyError:
print("""Index not in the list!""" )
return False
def _snake_case ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
return -0.1_8 + (1.1_7 * ((self.nir - self.red) / (self.nir + self.red)))
def _snake_case ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
return ((self.nir - self.redEdge) / (self.nir + self.redEdge)) / (
(self.nir - self.red) / (self.nir + self.red)
)
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
return self.nir * (self.red / (self.green**2))
def _snake_case ( self : int ) ->str:
'''simple docstring'''
return (2 * self.green - self.red - self.blue) / (
2 * self.green + self.red + self.blue
)
def _snake_case ( self : Dict ) ->str:
'''simple docstring'''
return (self.nir - self.red) / (self.nir + self.red)
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
return (self.nir - self.blue) / (self.nir + self.blue)
def _snake_case ( self : Dict ) ->Any:
'''simple docstring'''
return (self.redEdge - self.red) / (self.redEdge + self.red)
def _snake_case ( self : Optional[Any] ) ->Any:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green)
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
return (self.nir - (self.green + self.blue)) / (
self.nir + (self.green + self.blue)
)
def _snake_case ( self : Optional[Any] ) ->Any:
'''simple docstring'''
return (self.nir - (self.green + self.red)) / (
self.nir + (self.green + self.red)
)
def _snake_case ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
return (self.nir - (self.blue + self.red)) / (self.nir + (self.blue + self.red))
def _snake_case ( self : Optional[int] ) ->Any:
'''simple docstring'''
return (self.nir - (self.green + self.red + self.blue)) / (
self.nir + (self.green + self.red + self.blue)
)
def _snake_case ( self : Optional[Any] , __UpperCamelCase : List[str]=0.0_8 , __UpperCamelCase : Any=1.2_2 , __UpperCamelCase : Optional[int]=0.0_3 ) ->str:
'''simple docstring'''
return a * (
(self.nir - a * self.red - b)
/ (a * self.nir + self.red - a * b + x * (1 + a**2))
)
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
return (0.1 * self.nir - self.blue) / (0.1 * self.nir + self.blue)
def _snake_case ( self : List[Any] ) ->List[str]:
'''simple docstring'''
return (self.nir / self.green) - 1
def _snake_case ( self : Any ) ->Dict:
'''simple docstring'''
return (self.nir / self.redEdge) - 1
def _snake_case ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
return (self.red - self.blue) / self.red
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.ndvi()
return ((ndvi + 0.5) / (abs(ndvi + 0.5 ))) * (abs(ndvi + 0.5 ) ** (1 / 2))
def _snake_case ( self : Dict ) ->List[Any]:
'''simple docstring'''
return self.nir - self.green
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
return 2.5 * (
(self.nir - self.red) / (self.nir + 6 * self.red - 7.5 * self.blue + 1)
)
def _snake_case ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = (2 * (self.nir**2 - self.red**2) + 1.5 * self.nir + 0.5 * self.red) / (
self.nir + self.red + 0.5
)
return n * (1 - 0.2_5 * n) - (self.red - 0.1_2_5) / (1 - self.red)
def _snake_case ( self : Dict , __UpperCamelCase : Union[str, Any]=0.1_6 ) ->Optional[int]:
'''simple docstring'''
return (self.nir - self.green) / (self.nir + self.green + y)
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Optional[int]=0.5 ) ->Any:
'''simple docstring'''
return ((self.nir - self.green) / (self.nir + self.green + n)) * (1 + n)
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
return np.arctan(
((2 * self.red - self.green - self.blue) / 30.5) * (self.green - self.blue) )
def _snake_case ( self : Dict , __UpperCamelCase : int=None , __UpperCamelCase : Dict=None ) ->int:
'''simple docstring'''
return (self.nir - b) / (a * self.red)
def _snake_case ( self : List[Any] ) ->Tuple:
'''simple docstring'''
return (self.nir / ((self.nir + self.red) / 2)) * (self.ndvi() + 1)
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
return (self.red + self.green + self.blue) / 30.5
def _snake_case ( self : int ) ->int:
'''simple docstring'''
return self.nir / self.red
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
return (self.rvi() - 1) / (self.rvi() + 1)
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
return (
(2 * self.nir + 1)
- ((2 * self.nir + 1) ** 2 - 8 * (self.nir - self.red)) ** (1 / 2)
) / 2
def _snake_case ( self : str ) ->Optional[int]:
'''simple docstring'''
return self.green / (self.nir + self.red + self.green)
def _snake_case ( self : List[str] ) ->Optional[int]:
'''simple docstring'''
return self.nir / (self.nir + self.red + self.green)
def _snake_case ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
return self.red / (self.nir + self.red + self.green)
def _snake_case ( self : List[Any] ) ->Any:
'''simple docstring'''
return (self.green - self.red) / (self.green + self.red)
def _snake_case ( self : str ) ->str:
'''simple docstring'''
return (self.red - self.green) / (self.red + self.green)
def _snake_case ( self : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = np.max([np.max(self.red ), np.max(self.green ), np.max(self.blue )] )
_UpperCAmelCase = np.min([np.min(self.red ), np.min(self.green ), np.min(self.blue )] )
return (max_value - min_value) / max_value
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
return (2 * self.red - self.green - self.blue) / (self.green - self.blue)
def _snake_case ( self : Dict ) ->str:
'''simple docstring'''
return self.nir / self.red
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
return (self.ndvi() + 0.5) ** (1 / 2)
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
return (self.nir - self.redEdge) / (self.nir + self.redEdge) | 718 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase ( _A , _A=False ) -> str:
"""simple docstring"""
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
a : Union[str, Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
a : Tuple = parse_flag_from_env('''RUN_REMOTE''', default=False)
a : Union[str, Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
a : int = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
a : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
a : List[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
a : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
a : int = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
a : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
a : Any = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
a : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires faiss""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires regex""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires elasticsearch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires sqlalchemy""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires PyTorch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
if not config.TF_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires TensorFlow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
if not config.JAX_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires JAX""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not config.PIL_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires Pillow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
def _require_spacy_model(_A ):
try:
import spacy # noqa F401
spacy.load(_A )
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(_A ) )(_A )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCAmelCase = unittest.skip("""test is slow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
_UpperCAmelCase = unittest.skip("""test is local""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCAmelCase = unittest.skip("""test is packaged""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCAmelCase = unittest.skip("""test requires remote""" )(_A )
return test_case
def _UpperCamelCase ( *_A ) -> Dict:
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_A ) and name.startswith("""test""" ):
for decorator in decorators:
_UpperCAmelCase = decorator(_A )
setattr(cls , _A , _A )
return cls
return decorate
class a_ ( _UpperCAmelCase ):
pass
class a_ ( _UpperCAmelCase ):
a : Any = 0
a : Optional[Any] = 1
a : int = 2
@contextmanager
def _UpperCamelCase ( _A=OfflineSimulationMode.CONNECTION_FAILS , _A=1e-16 ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = requests.Session().request
def timeout_request(_A , _A , _A , **_A ):
# Change the url to an invalid url so that the connection hangs
_UpperCAmelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_UpperCAmelCase = timeout
try:
return online_request(_A , _A , **_A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCAmelCase = url
_UpperCAmelCase = e.args[0]
_UpperCAmelCase = (max_retry_error.args[0].replace("""10.255.255.1""" , F"""OfflineMock[{url}]""" ),)
_UpperCAmelCase = (max_retry_error,)
raise
def raise_connection_error(_A , _A , **_A ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=_A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , _A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , _A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , _A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _UpperCamelCase ( *_A , **_A ) -> str:
"""simple docstring"""
_UpperCAmelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_A , **_A ) as tmp_dir:
try:
os.chdir(_A )
yield
finally:
os.chdir(_A )
@contextmanager
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
return deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist()
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_A , *_A , **_A ):
try:
return func(*_A , **_A )
except HTTPError as err:
if str(_A ).startswith("""500""" ) or str(_A ).startswith("""502""" ):
pytest.xfail(str(_A ) )
raise err
return decorator.decorator(_wrapper , _A )
class a_ :
def __init__( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_A )
else:
break
async def _UpperCamelCase ( _A , _A=None , _A=None , _A=None , _A=False , _A=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(_A ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_A , _A , _A , _A="" ):
_UpperCAmelCase = line.decode("""utf-8""" ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _A : tee(_A , _A , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda _A : tee(_A , _A , sys.stderr , label="""stderr:""" ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def _UpperCamelCase ( _A , _A=None , _A=None , _A=1_8_0 , _A=False , _A=True ) -> _RunOutput:
"""simple docstring"""
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
_UpperCAmelCase = """ """.join(_A )
if result.returncode > 0:
_UpperCAmelCase = """\n""".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
_UpperCAmelCase = re.sub(R"""^gw""" , """""" , _A , 0 , re.M )
return int(_A )
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 2_9_5_0_0
_UpperCAmelCase = pytest_xdist_worker_id()
return port + uniq_delta | 19 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Tuple = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 719 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class a_ ( _UpperCAmelCase ):
a : List[Any] = ''
a : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Tuple , __UpperCamelCase : Optional[DatasetInfo] = None , __UpperCamelCase : Optional[str] = None , **__UpperCamelCase : Any , ) ->Any:
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
_UpperCAmelCase = repo_info
_UpperCAmelCase = token
_UpperCAmelCase = None
def _snake_case ( self : List[str] ) ->List[str]:
'''simple docstring'''
if self.dir_cache is None:
_UpperCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_UpperCAmelCase = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : str = "rb" , **__UpperCamelCase : Any , ) ->List[str]:
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
_UpperCAmelCase = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def _snake_case ( self : int , __UpperCamelCase : int , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple=False , **__UpperCamelCase : List[str] ) ->Optional[Any]:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = PurePosixPath(path.strip("""/""" ) )
_UpperCAmelCase = {}
for p, f in self.dir_cache.items():
_UpperCAmelCase = PurePosixPath(p.strip("""/""" ) )
_UpperCAmelCase = p.parent
if root == path:
_UpperCAmelCase = f
_UpperCAmelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 19 | 0 |
import torch
from diffusers import DPMSolverSDEScheduler
from diffusers.utils import torch_device
from diffusers.utils.testing_utils import require_torchsde
from .test_schedulers import SchedulerCommonTest
@require_torchsde
class a_ ( _UpperCAmelCase ):
a : Union[str, Any] = (DPMSolverSDEScheduler,)
a : Any = 10
def _snake_case ( self : Optional[int] , **__UpperCamelCase : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = {
"""num_train_timesteps""": 11_00,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""noise_sampler_seed""": 0,
}
config.update(**__UpperCamelCase )
return config
def _snake_case ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
for timesteps in [10, 50, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def _snake_case ( self : Dict ) ->Any:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_0_1, 0.0_0_0_1, 0.0_0_1] , [0.0_0_0_2, 0.0_0_2, 0.0_2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
for schedule in ["linear", "scaled_linear"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
for prediction_type in ["epsilon", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def _snake_case ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase = sample.to(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = output.prev_sample
_UpperCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.47_82_10_44_92_18_75 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_7_8_7_0_5_9_6_4_5_6_5_2_7_7 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_21_11_81_64_06 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_6_8_9_2_2_9_9_6_5_2 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter * scheduler.init_noise_sigma
_UpperCAmelCase = sample.to(__UpperCamelCase )
for i, t in enumerate(scheduler.timesteps ):
_UpperCAmelCase = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = output.prev_sample
_UpperCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_24.77_14_92_00_43_94_53 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_2_2_6_2_8_9_0_1_4_8_1_6_2_8_4 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_28.1_66_33_60_59_57_03 ) < 1e-2
assert abs(result_mean.item() - 0.1_6_6_8_8_3_2_6_0_0_1_1_6_7_2_9_7 ) < 1e-3
else:
assert abs(result_sum.item() - 1_19.8_48_75_48_82_81_25 ) < 1e-2
assert abs(result_mean.item() - 0.1_5_6_0_5_3_0_6_6_2_5_3_6_6_2_1 ) < 1e-3
def _snake_case ( self : Union[str, Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCamelCase )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.to(__UpperCamelCase ) * scheduler.init_noise_sigma
for t in scheduler.timesteps:
_UpperCAmelCase = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = output.prev_sample
_UpperCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_67.46_95_73_97_46_09_38 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_8_0_5_9_3_4_6_0_7_9_8_2_6_3_5 ) < 1e-3
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_71.59_35_36_37_69_53_12 ) < 1e-2
assert abs(result_mean.item() - 0.2_2_3_4_2_9_0_8_3_8_2_4_1_5_7_7_1 ) < 1e-3
else:
assert abs(result_sum.item() - 1_62.52_38_34_22_85_15_62 ) < 1e-2
assert abs(result_mean.item() - 0.2_1_1_6_1_9_5_7_0_8_5_1_3_2_6 ) < 1e-3
def _snake_case ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase , use_karras_sigmas=__UpperCamelCase )
scheduler.set_timesteps(self.num_inference_steps , device=__UpperCamelCase )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter.to(__UpperCamelCase ) * scheduler.init_noise_sigma
_UpperCAmelCase = sample.to(__UpperCamelCase )
for t in scheduler.timesteps:
_UpperCAmelCase = scheduler.scale_model_input(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = output.prev_sample
_UpperCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
if torch_device in ["mps"]:
assert abs(result_sum.item() - 1_76.66_97_41_35_74_21_88 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
elif torch_device in ["cuda"]:
assert abs(result_sum.item() - 1_77.63_65_35_64_45_31_25 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2
else:
assert abs(result_sum.item() - 1_70.3_13_52_23_38_86_72 ) < 1e-2
assert abs(result_mean.item() - 0.2_3_0_0_3_8_7_2_7_3_0_9_8_1_8_1_1 ) < 1e-2 | 720 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
a : Optional[Any] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
a : List[str] = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
a : Any = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
a : int = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
a : List[Any] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any]=[1, 10, 1_00] , __UpperCamelCase : Dict=4 , __UpperCamelCase : Tuple=3.0 ) ->Union[str, Any]:
'''simple docstring'''
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=__UpperCamelCase ) as executor:
_UpperCAmelCase = []
_UpperCAmelCase = Counter()
_UpperCAmelCase = 0
_UpperCAmelCase = defaultdict(__UpperCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(__UpperCamelCase , __UpperCamelCase ) ):
for candidate in candidates:
_UpperCAmelCase = candidate + """\n""" + test_case
_UpperCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
_UpperCAmelCase = executor.submit(__UpperCamelCase , *__UpperCamelCase )
futures.append(__UpperCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__UpperCamelCase ):
_UpperCAmelCase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
_UpperCAmelCase ,_UpperCAmelCase = [], []
for result in results.values():
result.sort()
_UpperCAmelCase = [r[1]["""passed"""] for r in result]
total.append(len(__UpperCamelCase ) )
correct.append(sum(__UpperCamelCase ) )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = k
_UpperCAmelCase = {f"""pass@{k}""": estimate_pass_at_k(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
def estimator(_A , _A , _A ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_A , _A ):
_UpperCAmelCase = itertools.repeat(_A , len(_A ) )
else:
assert len(_A ) == len(_A )
_UpperCAmelCase = iter(_A )
return np.array([estimator(int(_A ) , int(_A ) , _A ) for n, c in zip(_A , _A )] ) | 19 | 0 |
from functools import lru_cache
@lru_cache
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
if num < 0:
raise ValueError("""Number should not be negative.""" )
return 1 if num in (0, 1) else num * factorial(num - 1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 721 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> np.array:
"""simple docstring"""
_UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCAmelCase = np.zeros((n + 1,) )
_UpperCAmelCase = ya
_UpperCAmelCase = xa
for k in range(_A ):
_UpperCAmelCase = y[k] + step_size * ode_func(_A , y[k] )
_UpperCAmelCase = y[k] + (
(step_size / 2) * (ode_func(_A , y[k] ) + ode_func(x + step_size , _A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 19 | 0 |
"""simple docstring"""
import datasets
import faiss
import numpy as np
import streamlit as st
import torch
from elasticsearch import Elasticsearch
from elia_utils import (
embed_questions_for_retrieval,
make_qa_sas_model,
qa_sas_generate,
query_es_index,
query_qa_dense_index,
)
import transformers
from transformers import AutoModel, AutoModelForSeqaSeqLM, AutoTokenizer
a : int = '''bart'''
a : str = True
@st.cache(allow_output_mutation=_A )
def _UpperCamelCase ( ) -> str:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCAmelCase = AutoTokenizer.from_pretrained("""yjernite/retribert-base-uncased""" )
_UpperCAmelCase = AutoModel.from_pretrained("""yjernite/retribert-base-uncased""" ).to("""cuda:0""" )
_UpperCAmelCase = qar_model.eval()
else:
_UpperCAmelCase ,_UpperCAmelCase = (None, None)
if MODEL_TYPE == "bart":
_UpperCAmelCase = AutoTokenizer.from_pretrained("""yjernite/bart_eli5""" )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained("""yjernite/bart_eli5""" ).to("""cuda:0""" )
_UpperCAmelCase = torch.load("""seq2seq_models/eli5_bart_model_blm_2.pth""" )
sas_model.load_state_dict(save_dict["""model"""] )
_UpperCAmelCase = sas_model.eval()
else:
_UpperCAmelCase ,_UpperCAmelCase = make_qa_sas_model(
model_name="""t5-small""" , from_file="""seq2seq_models/eli5_t5_model_1024_4.pth""" , device="""cuda:0""" )
return (qar_tokenizer, qar_model, sas_tokenizer, sas_model)
@st.cache(allow_output_mutation=_A )
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
if LOAD_DENSE_INDEX:
_UpperCAmelCase = faiss.StandardGpuResources()
_UpperCAmelCase = datasets.load_dataset(path="""wiki_snippets""" , name="""wiki40b_en_100_0""" )["""train"""]
_UpperCAmelCase = np.memmap(
"""wiki40b_passages_reps_32_l-8_h-768_b-512-512.dat""" , dtype="""float32""" , mode="""r""" , shape=(wikiaab_passages.num_rows, 1_2_8) , )
_UpperCAmelCase = faiss.IndexFlatIP(1_2_8 )
_UpperCAmelCase = faiss.index_cpu_to_gpu(_A , 1 , _A )
wikiaab_gpu_index_flat.add(_A ) # TODO fix for larger GPU
else:
_UpperCAmelCase ,_UpperCAmelCase = (None, None)
_UpperCAmelCase = Elasticsearch([{"""host""": """localhost""", """port""": """9200"""}] )
return (wikiaab_passages, wikiaab_gpu_index_flat, es_client)
@st.cache(allow_output_mutation=_A )
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = datasets.load_dataset("""eli5""" , name="""LFQA_reddit""" )
_UpperCAmelCase = elia["""train_eli5"""]
_UpperCAmelCase = np.memmap(
"""eli5_questions_reps.dat""" , dtype="""float32""" , mode="""r""" , shape=(elia_train.num_rows, 1_2_8) )
_UpperCAmelCase = faiss.IndexFlatIP(1_2_8 )
eli5_train_q_index.add(_A )
return (elia_train, eli5_train_q_index)
a : str = load_indexes()
a : Union[str, Any] = load_models()
a : Tuple = load_train_data()
def _UpperCamelCase ( _A , _A=1_0 ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = embed_questions_for_retrieval([question] , _A , _A )
_UpperCAmelCase ,_UpperCAmelCase = eli5_train_q_index.search(_A , _A )
_UpperCAmelCase = [elia_train[int(_A )] for i in I[0]]
return nn_examples
def _UpperCamelCase ( _A , _A="wiki40b" , _A="dense" , _A=1_0 ) -> List[Any]:
"""simple docstring"""
if source == "none":
_UpperCAmelCase ,_UpperCAmelCase = (""" <P> """.join(["""""" for _ in range(1_1 )] ).strip(), [])
else:
if method == "dense":
_UpperCAmelCase ,_UpperCAmelCase = query_qa_dense_index(
_A , _A , _A , _A , _A , _A )
else:
_UpperCAmelCase ,_UpperCAmelCase = query_es_index(
_A , _A , index_name="""english_wiki40b_snippets_100w""" , n_results=_A , )
_UpperCAmelCase = [
(res["""article_title"""], res["""section_title"""].strip(), res["""score"""], res["""passage_text"""]) for res in hit_lst
]
_UpperCAmelCase = """question: {} context: {}""".format(_A , _A )
return question_doc, support_list
@st.cache(
hash_funcs={
torch.Tensor: (lambda _A : None),
transformers.models.bart.tokenization_bart.BartTokenizer: (lambda _A : None),
} )
def _UpperCamelCase ( _A , _A , _A , _A=6_4 , _A=2_5_6 , _A=False , _A=2 , _A=0.95 , _A=0.8 ) -> Any:
"""simple docstring"""
with torch.no_grad():
_UpperCAmelCase = qa_sas_generate(
_A , _A , _A , num_answers=1 , num_beams=_A , min_len=_A , max_len=_A , do_sample=_A , temp=_A , top_p=_A , top_k=_A , max_input_length=1_0_2_4 , device="""cuda:0""" , )[0]
return (answer, support_list)
st.title('''Long Form Question Answering with ELI5''')
# Start sidebar
a : Optional[Any] = '''<img src=\'https://huggingface.co/front/assets/huggingface_logo.svg\'>'''
a : Optional[int] = '''
<html>
<head>
<style>
.img-container {
padding-left: 90px;
padding-right: 90px;
padding-top: 50px;
padding-bottom: 50px;
background-color: #f0f3f9;
}
</style>
</head>
<body>
<span class="img-container"> <!-- Inline parent element -->
%s
</span>
</body>
</html>
''' % (
header_html,
)
st.sidebar.markdown(
header_full,
unsafe_allow_html=True,
)
# Long Form QA with ELI5 and Wikipedia
a : Any = '''
This demo presents a model trained to [provide long-form answers to open-domain questions](https://yjernite.github.io/lfqa.html).
First, a document retriever fetches a set of relevant Wikipedia passages given the question from the [Wiki40b](https://research.google/pubs/pub49029/) dataset,
a pre-processed fixed snapshot of Wikipedia.
'''
st.sidebar.markdown(description, unsafe_allow_html=True)
a : Dict = [
'''Answer the question''',
'''View the retrieved document only''',
'''View the most similar ELI5 question and answer''',
'''Show me everything, please!''',
]
a : Optional[Any] = st.sidebar.checkbox('''Demo options''')
if demo_options:
a : List[str] = st.sidebar.selectbox(
'''''',
action_list,
index=3,
)
a : Any = action_list.index(action_st)
a : List[Any] = st.sidebar.selectbox(
'''''',
['''Show full text of passages''', '''Show passage section titles'''],
index=0,
)
a : Optional[Any] = show_type == '''Show full text of passages'''
else:
a : Optional[int] = 3
a : Optional[Any] = True
a : Optional[Any] = st.sidebar.checkbox('''Retrieval options''')
if retrieval_options:
a : Union[str, Any] = '''
### Information retriever options
The **sparse** retriever uses ElasticSearch, while the **dense** retriever uses max-inner-product search between a question and passage embedding
trained using the [ELI5](https://arxiv.org/abs/1907.09190) questions-answer pairs.
The answer is then generated by sequence to sequence model which takes the question and retrieved document as input.
'''
st.sidebar.markdown(retriever_info)
a : Dict = st.sidebar.selectbox('''Which Wikipedia format should the model use?''', ['''wiki40b''', '''none'''])
a : List[Any] = st.sidebar.selectbox('''Which Wikipedia indexer should the model use?''', ['''dense''', '''sparse''', '''mixed'''])
else:
a : int = '''wiki40b'''
a : Dict = '''dense'''
a : Any = '''beam'''
a : str = 2
a : Tuple = 6_4
a : Union[str, Any] = 2_5_6
a : Optional[int] = None
a : Tuple = None
a : str = st.sidebar.checkbox('''Generation options''')
if generate_options:
a : str = '''
### Answer generation options
The sequence-to-sequence model was initialized with [BART](https://huggingface.co/facebook/bart-large)
weights and fine-tuned on the ELI5 QA pairs and retrieved documents. You can use the model for greedy decoding with
**beam** search, or **sample** from the decoder\'s output probabilities.
'''
st.sidebar.markdown(generate_info)
a : Dict = st.sidebar.selectbox('''Would you like to use beam search or sample an answer?''', ['''beam''', '''sampled'''])
a : List[Any] = st.sidebar.slider(
'''Minimum generation length''', min_value=8, max_value=2_5_6, value=6_4, step=8, format=None, key=None
)
a : Optional[int] = st.sidebar.slider(
'''Maximum generation length''', min_value=6_4, max_value=5_1_2, value=2_5_6, step=1_6, format=None, key=None
)
if sampled == "beam":
a : List[Any] = st.sidebar.slider('''Beam size''', min_value=1, max_value=8, value=2, step=None, format=None, key=None)
else:
a : int = st.sidebar.slider(
'''Nucleus sampling p''', min_value=0.1, max_value=1.0, value=0.95, step=0.01, format=None, key=None
)
a : Optional[Any] = st.sidebar.slider(
'''Temperature''', min_value=0.1, max_value=1.0, value=0.7, step=0.01, format=None, key=None
)
a : Optional[Any] = None
# start main text
a : Union[str, Any] = [
'''<MY QUESTION>''',
'''How do people make chocolate?''',
'''Why do we get a fever when we are sick?''',
'''How can different animals perceive different colors?''',
'''What is natural language processing?''',
'''What\'s the best way to treat a sunburn?''',
'''What exactly are vitamins ?''',
'''How does nuclear energy provide electricity?''',
'''What\'s the difference between viruses and bacteria?''',
'''Why are flutes classified as woodwinds when most of them are made out of metal ?''',
'''Why do people like drinking coffee even though it tastes so bad?''',
'''What happens when wine ages? How does it make the wine taste better?''',
'''If an animal is an herbivore, where does it get the protein that it needs to survive if it only eats grass?''',
'''How can we set a date to the beginning or end of an artistic period? Doesn\'t the change happen gradually?''',
'''How does New Zealand have so many large bird predators?''',
]
a : Dict = st.selectbox(
'''What would you like to ask? ---- select <MY QUESTION> to enter a new query''',
questions_list,
index=1,
)
if question_s == "<MY QUESTION>":
a : Union[str, Any] = st.text_input('''Enter your question here:''', '''''')
else:
a : Tuple = question_s
if st.button('''Show me!'''):
if action in [0, 1, 3]:
if index_type == "mixed":
a : List[Any] = make_support(question, source=wiki_source, method='''dense''', n_results=1_0)
a : Any = make_support(question, source=wiki_source, method='''sparse''', n_results=1_0)
a : Any = []
for res_d, res_s in zip(support_list_dense, support_list_sparse):
if tuple(res_d) not in support_list:
support_list += [tuple(res_d)]
if tuple(res_s) not in support_list:
support_list += [tuple(res_s)]
a : List[Any] = support_list[:1_0]
a : List[Any] = '''<P> ''' + ''' <P> '''.join([res[-1] for res in support_list])
else:
a : Any = make_support(question, source=wiki_source, method=index_type, n_results=1_0)
if action in [0, 3]:
a : Any = answer_question(
question_doc,
sas_model,
sas_tokenizer,
min_len=min_len,
max_len=int(max_len),
sampling=(sampled == '''sampled'''),
n_beams=n_beams,
top_p=top_p,
temp=temp,
)
st.markdown('''### The model generated answer is:''')
st.write(answer)
if action in [0, 1, 3] and wiki_source != "none":
st.markdown('''--- \n ### The model is drawing information from the following Wikipedia passages:''')
for i, res in enumerate(support_list):
a : str = '''https://en.wikipedia.org/wiki/{}'''.format(res[0].replace(''' ''', '''_'''))
a : Tuple = res[1].strip()
if sec_titles == "":
a : List[Any] = '''[{}]({})'''.format(res[0], wiki_url)
else:
a : Union[str, Any] = sec_titles.split(''' & ''')
a : Optional[Any] = ''' & '''.join(
['''[{}]({}#{})'''.format(sec.strip(), wiki_url, sec.strip().replace(''' ''', '''_''')) for sec in sec_list]
)
st.markdown(
'''{0:02d} - **Article**: {1:<18} <br> _Section_: {2}'''.format(i + 1, res[0], sections),
unsafe_allow_html=True,
)
if show_passages:
st.write(
'''> <span style="font-family:arial; font-size:10pt;">''' + res[-1] + '''</span>''', unsafe_allow_html=True
)
if action in [2, 3]:
a : int = find_nearest_training(question)
a : int = nn_train_list[0]
st.markdown(
'''--- \n ### The most similar question in the ELI5 training set was: \n\n {}'''.format(train_exple['''title'''])
)
a : Union[str, Any] = [
'''{}. {}'''.format(i + 1, ''' \n'''.join([line.strip() for line in ans.split('''\n''') if line.strip() != '''''']))
for i, (ans, sc) in enumerate(zip(train_exple['''answers''']['''text'''], train_exple['''answers''']['''score''']))
if i == 0 or sc > 2
]
st.markdown('''##### Its answers were: \n\n {}'''.format('''\n'''.join(answers_st)))
a : Any = '''
---
**Disclaimer**
*The intent of this app is to provide some (hopefully entertaining) insights into the behavior of a current LFQA system.
Evaluating biases of such a model and ensuring factual generations are still very much open research problems.
Therefore, until some significant progress is achieved, we caution against using the generated answers for practical purposes.*
'''
st.sidebar.markdown(disclaimer, unsafe_allow_html=True) | 700 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
a : List[str] = logging.getLogger(__name__)
a : int = {'''facebook/bart-base''': BartForConditionalGeneration}
a : Dict = {'''facebook/bart-base''': BartTokenizer}
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_A , default=_A , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_A , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_A , default=_A , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_A , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_A , )
parser.add_argument(
"""--config_name""" , type=_A , default=_A , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_A , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_A , default=_A , help="""Where to store the final ONNX file.""" )
_UpperCAmelCase = parser.parse_args()
return args
def _UpperCamelCase ( _A , _A="cpu" ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = model_dict[model_name].from_pretrained(_A ).to(_A )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_A )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_A ) )
with torch.no_grad():
_UpperCAmelCase = """My friends are cool but they eat too many carbs."""
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="""pt""" ).to(model.device )
_UpperCAmelCase = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_A , max_length=_A , early_stopping=_A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_A , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _A , opset_version=1_4 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_A , )
logger.info("""Model exported to {}""".format(_A ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_A ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_A ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_A )
_UpperCAmelCase = ort_sess.run(
_A , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_A ),
"""max_length""": np.array(_A ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase ,_UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _A )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_A )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_A , _A , _A , _A , _A )
if __name__ == "__main__":
main() | 19 | 0 |
"""simple docstring"""
import collections
import inspect
import unittest
from transformers import FocalNetConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_backbone_common import BackboneTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
FocalNetBackbone,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetModel,
)
from transformers.models.focalnet.modeling_focalnet import FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
def __init__( self : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any=13 , __UpperCamelCase : str=32 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Optional[Any]=3 , __UpperCamelCase : Optional[int]=16 , __UpperCamelCase : Union[str, Any]=[32, 64, 1_28] , __UpperCamelCase : str=[1, 2, 1] , __UpperCamelCase : Union[str, Any]=[2, 2, 4] , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Dict=2.0 , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[int]=0.0 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : str=False , __UpperCamelCase : str=True , __UpperCamelCase : Dict=0.0_2 , __UpperCamelCase : Optional[int]=1e-5 , __UpperCamelCase : int=True , __UpperCamelCase : List[str]=None , __UpperCamelCase : Any=True , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : List[str]=8 , __UpperCamelCase : Tuple=["stage1", "stage2"] , __UpperCamelCase : List[str]=[1, 2] , ) ->str:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = hidden_sizes
_UpperCAmelCase = depths
_UpperCAmelCase = num_heads
_UpperCAmelCase = window_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = use_absolute_embeddings
_UpperCAmelCase = patch_norm
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = is_training
_UpperCAmelCase = scope
_UpperCAmelCase = use_labels
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = encoder_stride
_UpperCAmelCase = out_features
_UpperCAmelCase = out_indices
def _snake_case ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : str ) ->Optional[Any]:
'''simple docstring'''
return FocalNetConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , embed_dim=self.embed_dim , hidden_sizes=self.hidden_sizes , depths=self.depths , num_heads=self.num_heads , window_size=self.window_size , mlp_ratio=self.mlp_ratio , qkv_bias=self.qkv_bias , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , drop_path_rate=self.drop_path_rate , hidden_act=self.hidden_act , use_absolute_embeddings=self.use_absolute_embeddings , path_norm=self.patch_norm , layer_norm_eps=self.layer_norm_eps , initializer_range=self.initializer_range , encoder_stride=self.encoder_stride , out_features=self.out_features , out_indices=self.out_indices , )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = FocalNetModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = ((config.image_size // config.patch_size) ** 2) // (4 ** (len(config.depths ) - 1))
_UpperCAmelCase = int(config.embed_dim * 2 ** (len(config.depths ) - 1) )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, expected_seq_len, expected_dim) )
def _snake_case ( self : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase = FocalNetBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , len(config.out_features ) )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size, 8, 8] )
# verify channels
self.parent.assertEqual(len(model.channels ) , len(config.out_features ) )
self.parent.assertListEqual(model.channels , config.hidden_sizes[:-1] )
# verify backbone works with out_features=None
_UpperCAmelCase = None
_UpperCAmelCase = FocalNetBackbone(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
# verify feature maps
self.parent.assertEqual(len(result.feature_maps ) , 1 )
self.parent.assertListEqual(list(result.feature_maps[0].shape ) , [self.batch_size, self.image_size * 2, 4, 4] )
# verify channels
self.parent.assertEqual(len(model.channels ) , 1 )
self.parent.assertListEqual(model.channels , [config.hidden_sizes[-1]] )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = FocalNetForMaskedImageModeling(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(
result.reconstruction.shape , (self.batch_size, self.num_channels, self.image_size, self.image_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = FocalNetForMaskedImageModeling(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.reconstruction.shape , (self.batch_size, 1, self.image_size, self.image_size) )
def _snake_case ( self : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = FocalNetForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = FocalNetForImageClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : str = (
(
FocalNetModel,
FocalNetForImageClassification,
FocalNetForMaskedImageModeling,
FocalNetBackbone,
)
if is_torch_available()
else ()
)
a : int = (
{'feature-extraction': FocalNetModel, 'image-classification': FocalNetForImageClassification}
if is_torch_available()
else {}
)
a : Optional[Any] = False
a : Dict = False
a : List[Any] = False
a : int = False
a : Optional[Any] = False
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = FocalNetModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , embed_dim=37 , has_text_modality=__UpperCamelCase )
def _snake_case ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
self.create_and_test_config_common_properties()
self.config_tester.create_and_test_config_to_json_string()
self.config_tester.create_and_test_config_to_json_file()
self.config_tester.create_and_test_config_from_and_save_pretrained()
self.config_tester.create_and_test_config_with_num_labels()
self.config_tester.check_config_can_be_init_without_params()
self.config_tester.check_config_arguments_init()
def _snake_case ( self : Dict ) ->List[Any]:
'''simple docstring'''
return
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _snake_case ( self : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_backbone(*__UpperCamelCase )
def _snake_case ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_image_modeling(*__UpperCamelCase )
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@unittest.skip(reason="""FocalNet does not use inputs_embeds""" )
def _snake_case ( self : List[Any] ) ->Any:
'''simple docstring'''
pass
@unittest.skip(reason="""FocalNet does not use feedforward chunking""" )
def _snake_case ( self : Tuple ) ->List[str]:
'''simple docstring'''
pass
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _snake_case ( self : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , len(self.model_tester.depths ) + 1 )
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
# FocalNet has a different seq_length
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
_UpperCAmelCase = outputs.reshaped_hidden_states
self.assertEqual(len(__UpperCamelCase ) , __UpperCamelCase )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = reshaped_hidden_states[0].shape
_UpperCAmelCase = (
reshaped_hidden_states[0].view(__UpperCamelCase , __UpperCamelCase , height * width ).permute(0 , 2 , 1 )
)
self.assertListEqual(
list(reshaped_hidden_states.shape[-2:] ) , [num_patches, self.model_tester.embed_dim] , )
def _snake_case ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = 3
_UpperCAmelCase = (
self.model_tester.image_size
if isinstance(self.model_tester.image_size , collections.abc.Iterable )
else (self.model_tester.image_size, self.model_tester.image_size)
)
_UpperCAmelCase = (
config.patch_size
if isinstance(config.patch_size , collections.abc.Iterable )
else (config.patch_size, config.patch_size)
)
_UpperCAmelCase = image_size[0] + patch_size[0] - (image_size[0] % patch_size[0])
_UpperCAmelCase = image_size[1] + patch_size[1] - (image_size[1] % patch_size[1])
for model_class in self.all_model_classes[:-1]:
_UpperCAmelCase = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
self.check_hidden_states_output(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , (padded_height, padded_width) )
@slow
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
for model_name in FOCALNET_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = FocalNetModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=__UpperCamelCase )
for name, param in model.named_parameters():
if "embeddings" not in name and param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@require_vision
@require_torch
class a_ ( unittest.TestCase ):
@cached_property
def _snake_case ( self : Dict ) ->Tuple:
'''simple docstring'''
return AutoImageProcessor.from_pretrained("""microsoft/focalnet-tiny""" ) if is_vision_available() else None
@slow
def _snake_case ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = FocalNetForImageClassification.from_pretrained("""microsoft/focalnet-tiny""" ).to(__UpperCamelCase )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
# verify the logits
_UpperCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(outputs.logits.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor([0.2_1_6_6, -0.4_3_6_8, 0.2_1_9_1] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
self.assertTrue(outputs.logits.argmax(dim=-1 ).item() , 2_81 )
@require_torch
class a_ ( _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (FocalNetBackbone,) if is_torch_available() else ()
a : Tuple = FocalNetConfig
a : Optional[Any] = False
def _snake_case ( self : Tuple ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = FocalNetModelTester(self ) | 701 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A , _A , _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = requests.get(_A , headers=_A , allow_redirects=_A )
_UpperCAmelCase = result.headers["""Location"""]
_UpperCAmelCase = requests.get(_A , allow_redirects=_A )
_UpperCAmelCase = os.path.join(_A , F"""{artifact_name}.zip""" )
with open(_A , """wb""" ) as fp:
fp.write(response.content )
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = None
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_A ) as f:
for line in f:
_UpperCAmelCase = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_UpperCAmelCase = line[: line.index(""": """ )]
_UpperCAmelCase = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_UpperCAmelCase = line[len("""FAILED """ ) :]
failed_tests.append(_A )
elif filename == "job_name.txt":
_UpperCAmelCase = line
if len(_A ) != len(_A ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(_A )} for `errors` """
F"""and {len(_A )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
_UpperCAmelCase = None
if job_name and job_links:
_UpperCAmelCase = job_links.get(_A , _A )
# A list with elements of the form (line of error, error, failed test)
_UpperCAmelCase = [x + [y] + [job_link] for x, y in zip(_A , _A )]
return result
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = [os.path.join(_A , _A ) for p in os.listdir(_A ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_A , job_links=_A ) )
return errors
def _UpperCamelCase ( _A , _A=None ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_UpperCAmelCase = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_UpperCAmelCase = test.split("""/""" )[2]
else:
_UpperCAmelCase = None
return test
def _UpperCamelCase ( _A , _A=None ) -> Any:
"""simple docstring"""
_UpperCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_UpperCAmelCase = [x for x in logs if x[2] is not None]
_UpperCAmelCase = {x[2] for x in logs}
_UpperCAmelCase = {}
for test in tests:
_UpperCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_UpperCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_UpperCAmelCase = {"""count""": n_errors, """errors""": error_counts}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = """| no. | error | status |"""
_UpperCAmelCase = """|-:|:-|:-|"""
_UpperCAmelCase = [header, sep]
for error in reduced_by_error:
_UpperCAmelCase = reduced_by_error[error]["""count"""]
_UpperCAmelCase = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(_A )
return "\n".join(_A )
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = """| model | no. of errors | major error | count |"""
_UpperCAmelCase = """|-:|-:|-:|-:|"""
_UpperCAmelCase = [header, sep]
for model in reduced_by_model:
_UpperCAmelCase = reduced_by_model[model]["""count"""]
_UpperCAmelCase ,_UpperCAmelCase = list(reduced_by_model[model]["""errors"""].items() )[0]
_UpperCAmelCase = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(_A )
return "\n".join(_A )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
a : Dict = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a : Tuple = get_job_links(args.workflow_run_id, token=args.token)
a : Tuple = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a : List[Any] = k.find(''' / ''')
a : Tuple = k[index + len(''' / ''') :]
a : int = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a : Union[str, Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a : Optional[int] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a : int = reduce_by_error(errors)
a : str = reduce_by_model(errors)
a : int = make_github_table(reduced_by_error)
a : Optional[int] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa) | 19 | 0 |
"""simple docstring"""
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
a : Tuple = logging.get_logger(__name__)
class a_ ( _UpperCAmelCase ):
a : Tuple = ['audio_values', 'audio_mask']
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[Any]=20_48 , __UpperCamelCase : Any=1 , __UpperCamelCase : Any=[16, 16] , __UpperCamelCase : Optional[int]=1_28 , __UpperCamelCase : Union[str, Any]=4_41_00 , __UpperCamelCase : List[str]=86 , __UpperCamelCase : int=20_48 , __UpperCamelCase : Optional[Any]=0.0 , **__UpperCamelCase : Dict , ) ->Dict:
'''simple docstring'''
super().__init__(
feature_size=__UpperCamelCase , sampling_rate=__UpperCamelCase , padding_value=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = spectrogram_length
_UpperCAmelCase = num_channels
_UpperCAmelCase = patch_size
_UpperCAmelCase = feature_size // self.patch_size[1]
_UpperCAmelCase = n_fft
_UpperCAmelCase = sampling_rate // hop_length_to_sampling_rate
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = padding_value
_UpperCAmelCase = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=__UpperCamelCase , min_frequency=0.0 , max_frequency=2_20_50.0 , sampling_rate=__UpperCamelCase , norm="""slaney""" , mel_scale="""slaney""" , ).T
def _snake_case ( self : Dict , __UpperCamelCase : np.array ) ->np.ndarray:
'''simple docstring'''
_UpperCAmelCase = spectrogram(
__UpperCamelCase , window_function(self.n_fft , """hann""" ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel="""dB""" , db_range=80.0 , )
_UpperCAmelCase = log_spec[:, :-1]
_UpperCAmelCase = log_spec - 20.0
_UpperCAmelCase = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : Optional[Any] , __UpperCamelCase : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Optional[bool] = True , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : bool = False , __UpperCamelCase : bool = False , **__UpperCamelCase : str , ) ->BatchFeature:
'''simple docstring'''
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
"""This feature extractor is set to support sampling rate"""
f""" of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled"""
f""" with {self.sampling_rate} and not {sampling_rate}.""" )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
_UpperCAmelCase = isinstance(__UpperCamelCase , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f"""Only mono-channel audio is supported for input to {self}""" )
_UpperCAmelCase = is_batched_numpy or (
isinstance(__UpperCamelCase , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
_UpperCAmelCase = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(__UpperCamelCase , np.ndarray ):
_UpperCAmelCase = np.asarray(__UpperCamelCase , dtype=np.floataa )
elif isinstance(__UpperCamelCase , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
_UpperCAmelCase = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
_UpperCAmelCase = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
_UpperCAmelCase = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , __UpperCamelCase ):
_UpperCAmelCase = [np.asarray(__UpperCamelCase , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
_UpperCAmelCase = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
_UpperCAmelCase = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
_UpperCAmelCase = np.array(__UpperCamelCase ).astype(np.floataa )
# convert into correct format for padding
_UpperCAmelCase = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
_UpperCAmelCase = np.ones([len(__UpperCamelCase ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
_UpperCAmelCase = padded_audio_features * self.padding_value
for i in range(len(__UpperCamelCase ) ):
_UpperCAmelCase = audio_features[i]
_UpperCAmelCase = feature
# return as BatchFeature
if return_attention_mask:
_UpperCAmelCase = {"""audio_values""": padded_audio_features, """audio_mask""": audio_mask}
else:
_UpperCAmelCase = {"""audio_values""": padded_audio_features}
_UpperCAmelCase = BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
return encoded_inputs | 702 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( _UpperCAmelCase ):
a : Any = ['image_processor', 'tokenizer']
a : Optional[int] = 'AutoImageProcessor'
a : Any = 'AutoTokenizer'
def __init__( self : List[str] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=None , **__UpperCamelCase : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop("""feature_extractor""" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def __call__( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->List[str]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""images""" , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""text""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_UpperCAmelCase = self.image_processor(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if text is not None:
_UpperCAmelCase = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCAmelCase = encodings["""input_ids"""]
return inputs
def _snake_case ( self : Union[str, Any] , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ) ->Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ) ->int:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@contextmanager
def _snake_case ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer
yield
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def _snake_case ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Union[str, Any]=None ) ->List[str]:
'''simple docstring'''
if added_vocab is None:
_UpperCAmelCase = self.tokenizer.get_added_vocab()
_UpperCAmelCase = {}
while tokens:
_UpperCAmelCase = re.search(r"""<s_(.*?)>""" , __UpperCamelCase , re.IGNORECASE )
if start_token is None:
break
_UpperCAmelCase = start_token.group(1 )
_UpperCAmelCase = re.search(rf"""</s_{key}>""" , __UpperCamelCase , re.IGNORECASE )
_UpperCAmelCase = start_token.group()
if end_token is None:
_UpperCAmelCase = tokens.replace(__UpperCamelCase , """""" )
else:
_UpperCAmelCase = end_token.group()
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __UpperCamelCase , re.IGNORECASE )
if content is not None:
_UpperCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_UpperCAmelCase = self.tokenajson(__UpperCamelCase , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if value:
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = value[0]
_UpperCAmelCase = value
else: # leaf nodes
_UpperCAmelCase = []
for leaf in content.split(r"""<sep/>""" ):
_UpperCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_UpperCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCamelCase )
if len(output[key] ) == 1:
_UpperCAmelCase = output[key][0]
_UpperCAmelCase = tokens[tokens.find(__UpperCamelCase ) + len(__UpperCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if len(__UpperCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , )
return self.image_processor | 19 | 0 |
"""simple docstring"""
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = filter(lambda _A : p.requires_grad , model.parameters() )
_UpperCAmelCase = sum([np.prod(p.size() ) for p in model_parameters] )
return params
a : Union[str, Any] = logging.getLogger(__name__)
def _UpperCamelCase ( _A , _A ) -> Dict:
"""simple docstring"""
if metric == "rouge2":
_UpperCAmelCase = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
_UpperCAmelCase = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
_UpperCAmelCase = """{val_avg_em:.4f}-{step_count}"""
elif metric == "loss":
_UpperCAmelCase = """{val_avg_loss:.4f}-{step_count}"""
else:
raise NotImplementedError(
F"""seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this"""
""" function.""" )
_UpperCAmelCase = ModelCheckpoint(
dirpath=_A , filename=_A , monitor=F"""val_{metric}""" , mode="""max""" , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def _UpperCamelCase ( _A , _A ) -> Tuple:
"""simple docstring"""
return EarlyStopping(
monitor=F"""val_{metric}""" , mode="""min""" if """loss""" in metric else """max""" , patience=_A , verbose=_A , )
class a_ ( pl.Callback ):
def _snake_case ( self : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = {f"""lr_group_{i}""": param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(__UpperCamelCase )
@rank_zero_only
def _snake_case ( self : Optional[Any] , __UpperCamelCase : pl.Trainer , __UpperCamelCase : pl.LightningModule , __UpperCamelCase : str , __UpperCamelCase : str=True ) ->None:
'''simple docstring'''
logger.info(f"""***** {type_path} results at step {trainer.global_step:05d} *****""" )
_UpperCAmelCase = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
_UpperCAmelCase = Path(pl_module.hparams.output_dir )
if type_path == "test":
_UpperCAmelCase = od / """test_results.txt"""
_UpperCAmelCase = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
_UpperCAmelCase = od / f"""{type_path}_results/{trainer.global_step:05d}.txt"""
_UpperCAmelCase = od / f"""{type_path}_generations/{trainer.global_step:05d}.txt"""
results_file.parent.mkdir(exist_ok=__UpperCamelCase )
generations_file.parent.mkdir(exist_ok=__UpperCamelCase )
with open(__UpperCamelCase , """a+""" ) as writer:
for key in sorted(__UpperCamelCase ):
if key in ["log", "progress_bar", "preds"]:
continue
_UpperCAmelCase = metrics[key]
if isinstance(__UpperCamelCase , torch.Tensor ):
_UpperCAmelCase = val.item()
_UpperCAmelCase = f"""{key}: {val:.6f}\n"""
writer.write(__UpperCamelCase )
if not save_generations:
return
if "preds" in metrics:
_UpperCAmelCase = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(__UpperCamelCase )
@rank_zero_only
def _snake_case ( self : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ) ->List[str]:
'''simple docstring'''
try:
_UpperCAmelCase = pl_module.model.model.num_parameters()
except AttributeError:
_UpperCAmelCase = pl_module.model.num_parameters()
_UpperCAmelCase = count_trainable_parameters(__UpperCamelCase )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def _snake_case ( self : Optional[int] , __UpperCamelCase : pl.Trainer , __UpperCamelCase : pl.LightningModule ) ->Optional[Any]:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(__UpperCamelCase , __UpperCamelCase , """test""" )
@rank_zero_only
def _snake_case ( self : List[Any] , __UpperCamelCase : pl.Trainer , __UpperCamelCase : Any ) ->str:
'''simple docstring'''
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 703 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( _A , _A , _A ) -> float:
"""simple docstring"""
_UpperCAmelCase = x
_UpperCAmelCase = y
for step in range(_A ): # noqa: B007
_UpperCAmelCase = a * a - b * b + x
_UpperCAmelCase = 2 * a * b + y
_UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_A , 1 , 1 ) )
def _UpperCamelCase ( _A = 8_0_0 , _A = 6_0_0 , _A = -0.6 , _A = 0 , _A = 3.2 , _A = 5_0 , _A = True , ) -> Image.Image:
"""simple docstring"""
_UpperCAmelCase = Image.new("""RGB""" , (image_width, image_height) )
_UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(_A ):
for image_y in range(_A ):
# determine the figure-coordinates based on the image-coordinates
_UpperCAmelCase = figure_width / image_width * image_height
_UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
_UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
_UpperCAmelCase = get_distance(_A , _A , _A )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_UpperCAmelCase = get_color_coded_rgb(_A )
else:
_UpperCAmelCase = get_black_and_white_rgb(_A )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 19 | 0 |
"""simple docstring"""
from typing import Dict, List
from nltk.translate import gleu_score
import datasets
from datasets import MetricInfo
a : List[Any] = '''\
@misc{wu2016googles,
title={Google\'s Neural Machine Translation System: Bridging the Gap between Human and Machine Translation},
author={Yonghui Wu and Mike Schuster and Zhifeng Chen and Quoc V. Le and Mohammad Norouzi and Wolfgang Macherey
and Maxim Krikun and Yuan Cao and Qin Gao and Klaus Macherey and Jeff Klingner and Apurva Shah and Melvin
Johnson and Xiaobing Liu and Łukasz Kaiser and Stephan Gouws and Yoshikiyo Kato and Taku Kudo and Hideto
Kazawa and Keith Stevens and George Kurian and Nishant Patil and Wei Wang and Cliff Young and
Jason Smith and Jason Riesa and Alex Rudnick and Oriol Vinyals and Greg Corrado and Macduff Hughes
and Jeffrey Dean},
year={2016},
eprint={1609.08144},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
'''
a : Any = '''\
The BLEU score has some undesirable properties when used for single
sentences, as it was designed to be a corpus measure. We therefore
use a slightly different score for our RL experiments which we call
the \'GLEU score\'. For the GLEU score, we record all sub-sequences of
1, 2, 3 or 4 tokens in output and target sequence (n-grams). We then
compute a recall, which is the ratio of the number of matching n-grams
to the number of total n-grams in the target (ground truth) sequence,
and a precision, which is the ratio of the number of matching n-grams
to the number of total n-grams in the generated output sequence. Then
GLEU score is simply the minimum of recall and precision. This GLEU
score\'s range is always between 0 (no matches) and 1 (all match) and
it is symmetrical when switching output and target. According to
our experiments, GLEU score correlates quite well with the BLEU
metric on a corpus level but does not have its drawbacks for our per
sentence reward objective.
'''
a : str = '''\
Computes corpus-level Google BLEU (GLEU) score of translated segments against one or more references.
Instead of averaging the sentence level GLEU scores (i.e. macro-average precision), Wu et al. (2016) sum up the matching
tokens and the max of hypothesis and reference tokens for each sentence, then compute using the aggregate values.
Args:
predictions (list of str): list of translations to score.
Each translation should be tokenized into a list of tokens.
references (list of list of str): list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
min_len (int): The minimum order of n-gram this function should extract. Defaults to 1.
max_len (int): The maximum order of n-gram this function should extract. Defaults to 4.
Returns:
\'google_bleu\': google_bleu score
Examples:
Example 1:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.44
Example 2:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references)
>>> print(round(results["google_bleu"], 2))
0.61
Example 3:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses, references=list_of_references, min_len=2)
>>> print(round(results["google_bleu"], 2))
0.53
Example 4:
>>> hyp1 = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'which\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'always\',
... \'disobeys\', \'the\', \'commands\', \'of\', \'the\', \'cat\']
>>> ref1a = [\'It\', \'is\', \'the\', \'guiding\', \'principle\', \'which\',
... \'guarantees\', \'the\', \'rubber\', \'duck\', \'forces\', \'never\',
... \'being\', \'under\', \'the\', \'command\', \'of\', \'the\', \'cat\']
>>> ref1b = [\'It\', \'is\', \'a\', \'guide\', \'to\', \'action\', \'that\',
... \'ensures\', \'that\', \'the\', \'rubber\', \'duck\', \'will\', \'never\',
... \'heed\', \'the\', \'cat\', \'commands\']
>>> ref1c = [\'It\', \'is\', \'the\', \'practical\', \'guide\', \'for\', \'the\',
... \'rubber\', \'duck\', \'army\', \'never\', \'to\', \'heed\', \'the\', \'directions\',
... \'of\', \'the\', \'cat\']
>>> hyp2 = [\'he\', \'read\', \'the\', \'book\', \'because\', \'he\', \'was\',
... \'interested\', \'in\', \'world\', \'history\']
>>> ref2a = [\'he\', \'was\', \'interested\', \'in\', \'world\', \'history\',
... \'because\', \'he\', \'read\', \'the\', \'book\']
>>> list_of_references = [[ref1a, ref1b, ref1c], [ref2a]]
>>> hypotheses = [hyp1, hyp2]
>>> google_bleu = datasets.load_metric("google_bleu")
>>> results = google_bleu.compute(predictions=hypotheses,references=list_of_references, min_len=2, max_len=6)
>>> print(round(results["google_bleu"], 2))
0.4
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def _snake_case ( self : Any ) ->MetricInfo:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ),
"""references""": datasets.Sequence(
datasets.Sequence(datasets.Value("""string""" , id="""token""" ) , id="""sequence""" ) , id="""references""" ),
} ) , )
def _snake_case ( self : Dict , __UpperCamelCase : List[List[List[str]]] , __UpperCamelCase : List[List[str]] , __UpperCamelCase : int = 1 , __UpperCamelCase : int = 4 , ) ->Dict[str, float]:
'''simple docstring'''
return {
"google_bleu": gleu_score.corpus_gleu(
list_of_references=__UpperCamelCase , hypotheses=__UpperCamelCase , min_len=__UpperCamelCase , max_len=__UpperCamelCase )
} | 704 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a_ ( nn.Module ):
def __init__( self : List[str] , __UpperCamelCase : int = 16 , __UpperCamelCase : int = 88 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 32 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "geglu" , __UpperCamelCase : Optional[int] = None , ) ->Dict:
'''simple docstring'''
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__UpperCamelCase , attention_head_dim=__UpperCamelCase , in_channels=__UpperCamelCase , num_layers=__UpperCamelCase , dropout=__UpperCamelCase , norm_num_groups=__UpperCamelCase , cross_attention_dim=__UpperCamelCase , attention_bias=__UpperCamelCase , sample_size=__UpperCamelCase , num_vector_embeds=__UpperCamelCase , activation_fn=__UpperCamelCase , num_embeds_ada_norm=__UpperCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def _snake_case ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : bool = True , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase , cross_attention_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections import Counter
from random import random
class a_ :
def __init__( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
def _snake_case ( self : str , __UpperCamelCase : str ) ->None:
'''simple docstring'''
_UpperCAmelCase = {}
def _snake_case ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : float ) ->None:
'''simple docstring'''
if nodea not in self.connections:
self.add_node(__UpperCamelCase )
if nodea not in self.connections:
self.add_node(__UpperCamelCase )
_UpperCAmelCase = probability
def _snake_case ( self : Dict ) ->list[str]:
'''simple docstring'''
return list(self.connections )
def _snake_case ( self : Tuple , __UpperCamelCase : str ) ->str:
'''simple docstring'''
_UpperCAmelCase = 0
_UpperCAmelCase = random()
for dest in self.connections[node]:
current_probability += self.connections[node][dest]
if current_probability > random_value:
return dest
return ""
def _UpperCamelCase ( _A , _A , _A ) -> dict[str, int]:
"""simple docstring"""
_UpperCAmelCase = MarkovChainGraphUndirectedUnweighted()
for nodea, nodea, probability in transitions:
graph.add_transition_probability(_A , _A , _A )
_UpperCAmelCase = Counter(graph.get_nodes() )
_UpperCAmelCase = start
for _ in range(_A ):
_UpperCAmelCase = graph.transition(_A )
visited[node] += 1
return visited
if __name__ == "__main__":
import doctest
doctest.testmod() | 705 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( _A , _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = LxmertConfig.from_json_file(_A )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase = LxmertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_A , _A , _A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 19 | 0 |
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a_ ( nn.Module ):
def __init__( self : List[str] , __UpperCamelCase : int = 16 , __UpperCamelCase : int = 88 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 32 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "geglu" , __UpperCamelCase : Optional[int] = None , ) ->Dict:
'''simple docstring'''
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__UpperCamelCase , attention_head_dim=__UpperCamelCase , in_channels=__UpperCamelCase , num_layers=__UpperCamelCase , dropout=__UpperCamelCase , norm_num_groups=__UpperCamelCase , cross_attention_dim=__UpperCamelCase , attention_bias=__UpperCamelCase , sample_size=__UpperCamelCase , num_vector_embeds=__UpperCamelCase , activation_fn=__UpperCamelCase , num_embeds_ada_norm=__UpperCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def _snake_case ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : bool = True , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase , cross_attention_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__UpperCamelCase ) | 706 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
a : str = '''examples/'''
a : List[str] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a : Tuple = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
a : List[str] = '''README.md'''
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase ,_UpperCAmelCase = REPLACE_PATTERNS[pattern]
_UpperCAmelCase = replace.replace("""VERSION""" , _A )
_UpperCAmelCase = re_pattern.sub(_A , _A )
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_A )
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern="""examples""" )
def _UpperCamelCase ( _A , _A=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
_UpperCAmelCase = """🤗 Transformers currently provides the following architectures"""
_UpperCAmelCase = """1. Want to contribute a new model?"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase = f.readlines()
# Find the start of the list.
_UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
_UpperCAmelCase = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_A )
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = REPLACE_PATTERNS["""init"""][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def _UpperCamelCase ( _A=False ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
_UpperCAmelCase = default_version.base_version
elif patch:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_UpperCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
_UpperCAmelCase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
def _UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_version()
_UpperCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
_UpperCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
_UpperCAmelCase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_A )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work() | 19 | 0 |
"""simple docstring"""
import math
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while num > 0:
_UpperCAmelCase = num % 8
_UpperCAmelCase = octal + (remainder * math.floor(math.pow(1_0 , _A ) ))
counter += 1
_UpperCAmelCase = math.floor(num / 8 ) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F"""0o{int(_A )}"""
def _UpperCamelCase ( ) -> None:
"""simple docstring"""
print("""\n2 in octal is:""" )
print(decimal_to_octal(2 ) ) # = 2
print("""\n8 in octal is:""" )
print(decimal_to_octal(8 ) ) # = 10
print("""\n65 in octal is:""" )
print(decimal_to_octal(6_5 ) ) # = 101
print("""\n216 in octal is:""" )
print(decimal_to_octal(2_1_6 ) ) # = 330
print("""\n512 in octal is:""" )
print(decimal_to_octal(5_1_2 ) ) # = 1000
print("""\n""" )
if __name__ == "__main__":
main() | 707 |
"""simple docstring"""
from __future__ import annotations
def _UpperCamelCase ( _A ) -> None:
"""simple docstring"""
create_state_space_tree(_A , [] , 0 , [0 for i in range(len(_A ) )] )
def _UpperCamelCase ( _A , _A , _A , _A , ) -> None:
"""simple docstring"""
if index == len(_A ):
print(_A )
return
for i in range(len(_A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_UpperCAmelCase = True
create_state_space_tree(_A , _A , index + 1 , _A )
current_sequence.pop()
_UpperCAmelCase = False
a : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
a : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 19 | 0 |
"""simple docstring"""
import json
import os
import shutil
import tempfile
import unittest
import numpy as np
import pytest
from transformers import BertTokenizer, BertTokenizerFast
from transformers.models.bert.tokenization_bert import VOCAB_FILES_NAMES
from transformers.testing_utils import require_vision
from transformers.utils import FEATURE_EXTRACTOR_NAME, is_vision_available
if is_vision_available():
from PIL import Image
from transformers import ChineseCLIPImageProcessor, ChineseCLIPProcessor
@require_vision
class a_ ( unittest.TestCase ):
def _snake_case ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = tempfile.mkdtemp()
_UpperCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""的""",
"""价""",
"""格""",
"""是""",
"""15""",
"""便""",
"""alex""",
"""##andra""",
""",""",
"""。""",
"""-""",
"""t""",
"""shirt""",
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_UpperCAmelCase = {
"""do_resize""": True,
"""size""": {"""height""": 2_24, """width""": 2_24},
"""do_center_crop""": True,
"""crop_size""": {"""height""": 18, """width""": 18},
"""do_normalize""": True,
"""image_mean""": [0.4_8_1_4_5_4_6_6, 0.4_5_7_8_2_7_5, 0.4_0_8_2_1_0_7_3],
"""image_std""": [0.2_6_8_6_2_9_5_4, 0.2_6_1_3_0_2_5_8, 0.2_7_5_7_7_7_1_1],
"""do_convert_rgb""": True,
}
_UpperCAmelCase = os.path.join(self.tmpdirname , __UpperCamelCase )
with open(self.image_processor_file , """w""" , encoding="""utf-8""" ) as fp:
json.dump(__UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Tuple , **__UpperCamelCase : Any ) ->List[Any]:
'''simple docstring'''
return BertTokenizer.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def _snake_case ( self : str , **__UpperCamelCase : str ) ->Optional[int]:
'''simple docstring'''
return BertTokenizerFast.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def _snake_case ( self : str , **__UpperCamelCase : Any ) ->Tuple:
'''simple docstring'''
return ChineseCLIPImageProcessor.from_pretrained(self.tmpdirname , **__UpperCamelCase )
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
shutil.rmtree(self.tmpdirname )
def _snake_case ( self : str ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = [np.random.randint(2_55 , size=(3, 30, 4_00) , dtype=np.uinta )]
_UpperCAmelCase = [Image.fromarray(np.moveaxis(__UpperCamelCase , 0 , -1 ) ) for x in image_inputs]
return image_inputs
def _snake_case ( self : Tuple ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor_slow.save_pretrained(self.tmpdirname )
_UpperCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname , use_fast=__UpperCamelCase )
_UpperCAmelCase = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
processor_fast.save_pretrained(self.tmpdirname )
_UpperCAmelCase = ChineseCLIPProcessor.from_pretrained(self.tmpdirname )
self.assertEqual(processor_slow.tokenizer.get_vocab() , tokenizer_slow.get_vocab() )
self.assertEqual(processor_fast.tokenizer.get_vocab() , tokenizer_fast.get_vocab() )
self.assertEqual(tokenizer_slow.get_vocab() , tokenizer_fast.get_vocab() )
self.assertIsInstance(processor_slow.tokenizer , __UpperCamelCase )
self.assertIsInstance(processor_fast.tokenizer , __UpperCamelCase )
self.assertEqual(processor_slow.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertEqual(processor_fast.image_processor.to_json_string() , image_processor.to_json_string() )
self.assertIsInstance(processor_slow.image_processor , __UpperCamelCase )
self.assertIsInstance(processor_fast.image_processor , __UpperCamelCase )
def _snake_case ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = ChineseCLIPProcessor(tokenizer=self.get_tokenizer() , image_processor=self.get_image_processor() )
processor.save_pretrained(self.tmpdirname )
_UpperCAmelCase = self.get_tokenizer(cls_token="""(CLS)""" , sep_token="""(SEP)""" )
_UpperCAmelCase = self.get_image_processor(do_normalize=__UpperCamelCase )
_UpperCAmelCase = ChineseCLIPProcessor.from_pretrained(
self.tmpdirname , cls_token="""(CLS)""" , sep_token="""(SEP)""" , do_normalize=__UpperCamelCase )
self.assertEqual(processor.tokenizer.get_vocab() , tokenizer_add_kwargs.get_vocab() )
self.assertIsInstance(processor.tokenizer , __UpperCamelCase )
self.assertEqual(processor.image_processor.to_json_string() , image_processor_add_kwargs.to_json_string() )
self.assertIsInstance(processor.image_processor , __UpperCamelCase )
def _snake_case ( self : str ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = image_processor(__UpperCamelCase , return_tensors="""np""" )
_UpperCAmelCase = processor(images=__UpperCamelCase , return_tensors="""np""" )
for key in input_feat_extract.keys():
self.assertAlmostEqual(input_feat_extract[key].sum() , input_processor[key].sum() , delta=1e-2 )
def _snake_case ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_UpperCAmelCase = """Alexandra,T-shirt的价格是15便士。"""
_UpperCAmelCase = processor(text=__UpperCamelCase )
_UpperCAmelCase = tokenizer(__UpperCamelCase )
for key in encoded_tok.keys():
self.assertListEqual(encoded_tok[key] , encoded_processor[key] )
def _snake_case ( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_UpperCAmelCase = """Alexandra,T-shirt的价格是15便士。"""
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , ["""input_ids""", """token_type_ids""", """attention_mask""", """pixel_values"""] )
# test if it raises when no input is passed
with pytest.raises(__UpperCamelCase ):
processor()
def _snake_case ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_UpperCAmelCase = [[1, 4, 5, 8, 1, 0, 8], [3, 4, 3, 1, 1, 8, 9]]
_UpperCAmelCase = processor.batch_decode(__UpperCamelCase )
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = self.get_image_processor()
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = ChineseCLIPProcessor(tokenizer=__UpperCamelCase , image_processor=__UpperCamelCase )
_UpperCAmelCase = """Alexandra,T-shirt的价格是15便士。"""
_UpperCAmelCase = self.prepare_image_inputs()
_UpperCAmelCase = processor(text=__UpperCamelCase , images=__UpperCamelCase )
self.assertListEqual(list(inputs.keys() ) , processor.model_input_names ) | 708 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : int=32 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : Dict=3 , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=32 , __UpperCamelCase : Any=4 , __UpperCamelCase : Optional[int]=[0, 1, 2, 3] , __UpperCamelCase : str=4 , __UpperCamelCase : Optional[Any]=37 , __UpperCamelCase : str="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Any=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : int=[1, 3_84, 24, 24] , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any=None , ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = backbone_out_indices
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = backbone_featmap_shape
_UpperCAmelCase = scope
_UpperCAmelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 1_92, 3_84, 7_68],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _snake_case ( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _snake_case ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a : int = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : str = False
a : List[str] = False
a : Dict = False
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = DPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def _snake_case ( self : Optional[int] ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
pass
def _snake_case ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def _snake_case ( self : str ) ->Any:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ):
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = False
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
_UpperCAmelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_UpperCAmelCase = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self : Dict ) ->Tuple:
'''simple docstring'''
pass
@slow
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_UpperCAmelCase = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = """add"""
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class a_ ( unittest.TestCase ):
def _snake_case ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
_UpperCAmelCase = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
_UpperCAmelCase = outputs.predicted_depth
# verify the predicted depth
_UpperCAmelCase = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , __UpperCamelCase , atol=1e-4 ) ) | 19 | 0 |
"""simple docstring"""
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = """"""
for i in table:
res += inp[i - 1]
return res
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
return data[1:] + data[0]
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = """"""
for i in range(len(_A ) ):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def _UpperCamelCase ( _A , _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = int("""0b""" + data[0] + data[-1] , 2 )
_UpperCAmelCase = int("""0b""" + data[1:3] , 2 )
return bin(s[row][col] )[2:]
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = message[:4]
_UpperCAmelCase = message[4:]
_UpperCAmelCase = apply_table(_A , _A )
_UpperCAmelCase = xor(_A , _A )
_UpperCAmelCase = apply_sbox(_A , temp[:4] ) # noqa: E741
_UpperCAmelCase = apply_sbox(_A , temp[4:] )
_UpperCAmelCase = """0""" * (2 - len(_A )) + l # noqa: E741
_UpperCAmelCase = """0""" * (2 - len(_A )) + r
_UpperCAmelCase = apply_table(l + r , _A )
_UpperCAmelCase = xor(_A , _A )
return temp + right
if __name__ == "__main__":
a : Optional[Any] = input('''Enter 10 bit key: ''')
a : Dict = input('''Enter 8 bit message: ''')
a : List[Any] = [6, 3, 7, 4, 8, 5, 1_0, 9]
a : Optional[int] = [3, 5, 2, 7, 4, 1_0, 1, 9, 8, 6]
a : Dict = [2, 4, 3, 1]
a : Union[str, Any] = [2, 6, 3, 1, 4, 8, 5, 7]
a : str = [4, 1, 3, 5, 7, 2, 8, 6]
a : List[str] = [4, 1, 2, 3, 2, 3, 4, 1]
a : Tuple = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
a : Optional[Any] = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
a : Optional[Any] = apply_table(key, paa_table)
a : Union[str, Any] = temp[:5]
a : Tuple = temp[5:]
a : Dict = left_shift(left)
a : Any = left_shift(right)
a : Dict = apply_table(left + right, pa_table)
a : str = left_shift(left)
a : Union[str, Any] = left_shift(right)
a : Dict = left_shift(left)
a : Optional[int] = left_shift(right)
a : Any = apply_table(left + right, pa_table)
# encryption
a : List[Any] = apply_table(message, IP)
a : Tuple = function(expansion, sa, sa, keya, temp)
a : Optional[Any] = temp[4:] + temp[:4]
a : Dict = function(expansion, sa, sa, keya, temp)
a : int = apply_table(temp, IP_inv)
print('''Cipher text is:''', CT)
# decryption
a : Tuple = apply_table(CT, IP)
a : Optional[Any] = function(expansion, sa, sa, keya, temp)
a : List[str] = temp[4:] + temp[:4]
a : str = function(expansion, sa, sa, keya, temp)
a : List[Any] = apply_table(temp, IP_inv)
print('''Plain text after decypting is:''', PT) | 709 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a : List[str] = logging.get_logger(__name__)
class a_ ( enum.Enum ):
a : Optional[Any] = 0
a : Dict = 1
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'generated'
def __init__( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : str ) ->Any:
'''simple docstring'''
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _snake_case ( self : Optional[int] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Dict=None , **__UpperCamelCase : Any , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
if truncation is not None:
_UpperCAmelCase = truncation
_UpperCAmelCase = generate_kwargs
_UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
_UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCAmelCase = self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
_UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _snake_case ( self : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
return True
def _snake_case ( self : Optional[Any] , *__UpperCamelCase : Any , __UpperCamelCase : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
_UpperCAmelCase = ([prefix + arg for arg in args[0]],)
_UpperCAmelCase = True
elif isinstance(args[0] , __UpperCamelCase ):
_UpperCAmelCase = (prefix + args[0],)
_UpperCAmelCase = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
_UpperCAmelCase = self.tokenizer(*__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Dict , *__UpperCamelCase : str , **__UpperCamelCase : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = super().__call__(*__UpperCamelCase , **__UpperCamelCase )
if (
isinstance(args[0] , __UpperCamelCase )
and all(isinstance(__UpperCamelCase , __UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _snake_case ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : str=TruncationStrategy.DO_NOT_TRUNCATE , **__UpperCamelCase : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self._parse_and_tokenize(__UpperCamelCase , truncation=__UpperCamelCase , **__UpperCamelCase )
return inputs
def _snake_case ( self : str , __UpperCamelCase : Dict , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
if self.framework == "pt":
_UpperCAmelCase ,_UpperCAmelCase = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
_UpperCAmelCase ,_UpperCAmelCase = tf.shape(model_inputs["""input_ids"""] ).numpy()
_UpperCAmelCase = generate_kwargs.get("""min_length""" , self.model.config.min_length )
_UpperCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__UpperCamelCase , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
_UpperCAmelCase = self.model.generate(**__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCAmelCase = output_ids.reshape(__UpperCamelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
_UpperCAmelCase = tf.reshape(__UpperCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any]=ReturnType.TEXT , __UpperCamelCase : int=False ) ->Any:
'''simple docstring'''
_UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCAmelCase = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
_UpperCAmelCase = {
f"""{self.return_name}_text""": self.tokenizer.decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , )
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'summary'
def __call__( self : Optional[Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[int] ) ->Any:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : str , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : Optional[int] = 'translation'
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def _snake_case ( self : Tuple , *__UpperCamelCase : List[str] , __UpperCamelCase : Tuple=TruncationStrategy.DO_NOT_TRUNCATE , __UpperCamelCase : Tuple=None , __UpperCamelCase : Union[str, Any]=None ) ->Tuple:
'''simple docstring'''
if getattr(self.tokenizer , """_build_translation_inputs""" , __UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase , return_tensors=self.framework , truncation=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase , truncation=__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : int=None , __UpperCamelCase : int=None , **__UpperCamelCase : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
_UpperCAmelCase = src_lang
if tgt_lang is not None:
_UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCAmelCase = kwargs.get("""task""" , self.task )
_UpperCAmelCase = task.split("""_""" )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
_UpperCAmelCase = items[1]
_UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->int:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
import random
import unittest
import numpy as np
import transformers
from transformers import is_flax_available, is_torch_available
from transformers.testing_utils import is_pt_flax_cross_test, require_flax
if is_flax_available():
import os
import jax.numpy as jnp
from jax import jit
from transformers import AutoTokenizer, FlaxAutoModelForCausalLM
from transformers.modeling_flax_pytorch_utils import load_flax_weights_in_pytorch_model
a : Optional[int] = '''0.12''' # assumed parallelism: 8
if is_torch_available():
import torch
def _UpperCamelCase ( _A , _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
if rng is None:
_UpperCAmelCase = random.Random()
_UpperCAmelCase = 1
for dim in shape:
total_dims *= dim
_UpperCAmelCase = []
for _ in range(_A ):
values.append(rng.randint(0 , vocab_size - 1 ) )
_UpperCAmelCase = np.array(_A , dtype=jnp.intaa ).reshape(_A )
return output
def _UpperCamelCase ( _A , _A=None ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = ids_tensor(_A , vocab_size=2 , rng=_A )
# make sure that at least one token is attended to for each batch
_UpperCAmelCase = 1
return attn_mask
@require_flax
class a_ :
a : Union[str, Any] = None
a : List[str] = ()
def _snake_case ( self : int ) ->Optional[Any]:
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
# cut to half length & take max batch_size 3
_UpperCAmelCase = 2
_UpperCAmelCase = inputs["""input_ids"""].shape[-1] // 2
_UpperCAmelCase = inputs["""input_ids"""][:max_batch_size, :sequence_length]
_UpperCAmelCase = jnp.ones_like(__UpperCamelCase )
_UpperCAmelCase = attention_mask[:max_batch_size, :sequence_length]
# generate max 5 tokens
_UpperCAmelCase = input_ids.shape[-1] + 5
if config.eos_token_id is not None and config.pad_token_id is None:
# hack to allow generate for models such as GPT2 as is done in `generate()`
_UpperCAmelCase = config.eos_token_id
return config, input_ids, attention_mask, max_length
@is_pt_flax_cross_test
def _snake_case ( self : Any ) ->List[str]:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = False
_UpperCAmelCase = max_length
_UpperCAmelCase = 0
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model_class.__name__[4:] # Skip the "Flax" at the beginning
_UpperCAmelCase = getattr(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = pt_model_class(__UpperCamelCase ).eval()
_UpperCAmelCase = load_flax_weights_in_pytorch_model(__UpperCamelCase , flax_model.params )
_UpperCAmelCase = flax_model.generate(__UpperCamelCase ).sequences
_UpperCAmelCase = pt_model.generate(torch.tensor(__UpperCamelCase , dtype=torch.long ) )
if flax_generation_outputs.shape[-1] > pt_generation_outputs.shape[-1]:
_UpperCAmelCase = flax_generation_outputs[:, : pt_generation_outputs.shape[-1]]
self.assertListEqual(pt_generation_outputs.numpy().tolist() , flax_generation_outputs.tolist() )
def _snake_case ( self : List[Any] ) ->List[Any]:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = False
_UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self : List[str] ) ->List[str]:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = True
_UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self : Any ) ->List[str]:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = False
_UpperCAmelCase = max_length
_UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self : Tuple ) ->Dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = False
_UpperCAmelCase = max_length
_UpperCAmelCase = 2
_UpperCAmelCase = 2
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[0] , input_ids.shape[0] * config.num_return_sequences )
def _snake_case ( self : Dict ) ->List[str]:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = True
_UpperCAmelCase = max_length
_UpperCAmelCase = 0.8
_UpperCAmelCase = 10
_UpperCAmelCase = 0.3
_UpperCAmelCase = 1
_UpperCAmelCase = 8
_UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self : Dict ) ->List[Any]:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = max_length
_UpperCAmelCase = 1
_UpperCAmelCase = 8
_UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = self._get_input_ids_and_config()
_UpperCAmelCase = max_length
_UpperCAmelCase = 2
_UpperCAmelCase = 1
_UpperCAmelCase = 8
_UpperCAmelCase = 9
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self : str ) ->Optional[int]:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
_UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
_UpperCAmelCase = False
_UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self : List[str] ) ->Tuple:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
_UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
_UpperCAmelCase = True
_UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
def _snake_case ( self : List[Any] ) ->Dict:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = self._get_input_ids_and_config()
# pad attention mask on the left
_UpperCAmelCase = attention_mask.at[(0, 0)].set(0 )
_UpperCAmelCase = 2
_UpperCAmelCase = max_length
for model_class in self.all_generative_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertEqual(generation_outputs.shape[-1] , __UpperCamelCase )
_UpperCAmelCase = jit(model.generate )
_UpperCAmelCase = jit_generate(__UpperCamelCase , attention_mask=__UpperCamelCase ).sequences
self.assertListEqual(generation_outputs.tolist() , jit_generation_outputs.tolist() )
@require_flax
class a_ ( unittest.TestCase ):
def _snake_case ( self : int ) ->int:
_UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/tiny-bert""" )
_UpperCAmelCase = FlaxAutoModelForCausalLM.from_pretrained("""hf-internal-testing/tiny-bert-flax-only""" )
_UpperCAmelCase = """Hello world"""
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""np""" ).input_ids
# typos are quickly detected (the correct argument is `do_sample`)
with self.assertRaisesRegex(__UpperCamelCase , """do_samples""" ):
model.generate(__UpperCamelCase , do_samples=__UpperCamelCase )
# arbitrary arguments that will not be used anywhere are also not accepted
with self.assertRaisesRegex(__UpperCamelCase , """foo""" ):
_UpperCAmelCase = {"""foo""": """bar"""}
model.generate(__UpperCamelCase , **__UpperCamelCase ) | 710 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
@property
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__UpperCamelCase )
def _snake_case ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.dummy_uncond_unet
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = self.dummy_vq_model
_UpperCAmelCase = LDMPipeline(unet=__UpperCamelCase , vqvae=__UpperCamelCase , scheduler=__UpperCamelCase )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" ).images
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" , return_dict=__UpperCamelCase )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a_ ( unittest.TestCase ):
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=5 , output_type="""numpy""" ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCAmelCase = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 19 | 0 |
"""simple docstring"""
from math import pi, sqrt
def _UpperCamelCase ( _A ) -> float:
if num <= 0:
raise ValueError("""math domain error""" )
if num > 171.5:
raise OverflowError("""math range error""" )
elif num - int(_A ) not in (0, 0.5):
raise NotImplementedError("""num must be an integer or a half-integer""" )
elif num == 0.5:
return sqrt(_A )
else:
return 1.0 if num == 1 else (num - 1) * gamma(num - 1 )
def _UpperCamelCase ( ) -> None:
assert gamma(0.5 ) == sqrt(_A )
assert gamma(1 ) == 1.0
assert gamma(2 ) == 1.0
if __name__ == "__main__":
from doctest import testmod
testmod()
a : int = 1.0
while num:
a : Tuple = float(input('''Gamma of: '''))
print(F"gamma({num}) = {gamma(num)}")
print('''\nEnter 0 to exit...''') | 711 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
a : str = True
except (ImportError, ModuleNotFoundError):
a : List[str] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
re.sub("""<n>""" , """""" , _A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_A ) ) | 19 | 0 |
"""simple docstring"""
import sys
from pathlib import Path
a : Optional[int] = Path(__file__).resolve().parents[3] / '''src'''
sys.path.insert(1, str(git_repo_path))
import dataclasses # noqa
import io # noqa
import itertools # noqa
import json # noqa
import os # noqa
import unittest # noqa
from copy import deepcopy # noqa
from parameterized import parameterized # noqa
from transformers import TrainingArguments, is_torch_available # noqa
from transformers.deepspeed import is_deepspeed_available # noqa
from transformers.file_utils import WEIGHTS_NAME # noqa
from transformers.testing_utils import ( # noqa
CaptureLogger,
ExtendSysPath,
TestCasePlus,
execute_subprocess_async,
get_gpu_count,
mockenv_context,
require_deepspeed,
require_torch_gpu,
require_torch_multi_gpu,
slow,
)
from transformers.trainer_utils import set_seed # noqa
set_seed(4_2)
a : str = {'''base''': '''patrickvonplaten/wav2vec2_tiny_random''', '''robust''': '''patrickvonplaten/wav2vec2_tiny_random_robust'''}
a : List[Any] = '''zero2'''
a : Any = '''zero3'''
a : List[str] = [ZEROa, ZEROa]
def _UpperCamelCase ( _A , _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = parameterized.to_safe_name("""_""".join(str(_A ) for x in param.args ) )
return F"""{func.__name__}_{param_based_name}"""
# Cartesian-product of zero stages with models to test
a : List[str] = list(itertools.product(stages, models.keys()))
@slow
@require_deepspeed
@require_torch_gpu
class a_ ( _UpperCAmelCase ):
'''simple docstring'''
@parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str ) ->Optional[int]:
'''simple docstring'''
self.run_and_check(
stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] ) ->int:
'''simple docstring'''
self.run_and_check(
stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , )
@parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : int ) ->Dict:
'''simple docstring'''
self.run_and_check(
stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , )
@require_torch_multi_gpu
@parameterized.expand(__UpperCamelCase , name_func=__UpperCamelCase )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
self.run_and_check(
stage=__UpperCamelCase , model=__UpperCamelCase , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , )
def _snake_case ( self : Tuple , __UpperCamelCase : Tuple ) ->Optional[Any]:
'''simple docstring'''
pass
def _snake_case ( self : str , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : int = 10 , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = models[model]
_UpperCAmelCase = self.run_trainer(
stage=__UpperCamelCase , model_name=__UpperCamelCase , eval_steps=__UpperCamelCase , num_train_epochs=1 , distributed=__UpperCamelCase , fpaa=__UpperCamelCase , )
self.do_checks(__UpperCamelCase )
return output_dir
def _snake_case ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : int = 10 , __UpperCamelCase : int = 1 , __UpperCamelCase : bool = True , __UpperCamelCase : bool = True , ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.get_auto_remove_tmp_dir("""./xxx""" , after=__UpperCamelCase )
_UpperCAmelCase = f"""
--model_name_or_path {model_name}
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--validation_split_name validation
--output_dir {output_dir}
--num_train_epochs {str(__UpperCamelCase )}
--per_device_train_batch_size 2
--per_device_eval_batch_size 2
--evaluation_strategy steps
--learning_rate 5e-4
--warmup_steps 8
--orthography timit
--preprocessing_num_workers 1
--group_by_length
--freeze_feature_extractor
--report_to none
--save_steps 0
--eval_steps {eval_steps}
--report_to none
""".split()
if fpaa:
args.extend(["""--fp16"""] )
# currently ds_config_wav2vec2_zero.json requires "zero_optimization.find_unused_parameters": true,
# hence the separate config files
_UpperCAmelCase = f"""--deepspeed {self.test_file_dir_str}/ds_config_wav2vec2_{stage}.json""".split()
_UpperCAmelCase = [f"""{self.examples_dir_str}/research_projects/wav2vec2/run_asr.py"""]
_UpperCAmelCase = self.get_launcher(__UpperCamelCase )
_UpperCAmelCase = launcher + script + args + ds_args
# keep for quick debug
# print(" ".join([f"\nPYTHONPATH={self.src_dir_str}"] +cmd)); die
execute_subprocess_async(__UpperCamelCase , env=self.get_env() )
return output_dir
def _snake_case ( self : Optional[int] , __UpperCamelCase : int=False ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = min(2 , get_gpu_count() ) if distributed else 1
return f"""deepspeed --num_nodes 1 --num_gpus {num_gpus}""".split() | 712 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a : str = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a_ :
a : List[Any] = PegasusConfig
a : Dict = {}
a : List[Any] = 'gelu'
def __init__( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple=13 , __UpperCamelCase : Tuple=7 , __UpperCamelCase : Tuple=True , __UpperCamelCase : Any=False , __UpperCamelCase : Any=99 , __UpperCamelCase : Optional[int]=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Dict=37 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Optional[Any]=20 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Tuple=0 , ) ->int:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def _snake_case ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _snake_case ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _UpperCamelCase ( _A , _A , _A , _A=None , _A=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = np.not_equal(_A , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a_ ( _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a : Any = True
a : int = False
a : Union[str, Any] = False
a : Optional[int] = False
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase )
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(__UpperCamelCase : List[Any] , __UpperCamelCase : str=None , **__UpperCamelCase : int ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_UpperCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self : int ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__UpperCamelCase )
_UpperCAmelCase = np.ones((1, 1) )
_UpperCAmelCase = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_UpperCAmelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""np""" , truncation=__UpperCamelCase , max_length=5_12 , padding=__UpperCamelCase )
_UpperCAmelCase = model.generate(**__UpperCamelCase , num_beams=2 ).sequences
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert tgt_text == decoded | 19 | 0 |
"""simple docstring"""
from typing import Dict, List, Optional, Type
from .. import config
from ..utils import logging
from .formatting import (
ArrowFormatter,
CustomFormatter,
Formatter,
PandasFormatter,
PythonFormatter,
TensorFormatter,
format_table,
query_table,
)
from .np_formatter import NumpyFormatter
a : Union[str, Any] = logging.get_logger(__name__)
a : Dict[Optional[str], Type[Formatter]] = {}
a : Dict[Optional[str], str] = {}
a : Dict[Optional[str], Exception] = {}
def _UpperCamelCase ( _A , _A , _A = None , ) -> str:
"""simple docstring"""
_UpperCAmelCase = aliases if aliases is not None else []
if format_type in _FORMAT_TYPES:
logger.warning(
F"""Overwriting format type '{format_type}' ({_FORMAT_TYPES[format_type].__name__} -> {formatter_cls.__name__})""" )
_UpperCAmelCase = formatter_cls
for alias in set(aliases + [format_type] ):
if alias in _FORMAT_TYPES_ALIASES:
logger.warning(
F"""Overwriting format type alias '{alias}' ({_FORMAT_TYPES_ALIASES[alias]} -> {format_type})""" )
_UpperCAmelCase = format_type
def _UpperCamelCase ( _A , _A , _A = None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = aliases if aliases is not None else []
for alias in set(aliases + [format_type] ):
_UpperCAmelCase = unavailable_error
# Here we define all the available formatting functions that can be used by `Dataset.set_format`
_register_formatter(PythonFormatter, None, aliases=['''python'''])
_register_formatter(ArrowFormatter, '''arrow''', aliases=['''pa''', '''pyarrow'''])
_register_formatter(NumpyFormatter, '''numpy''', aliases=['''np'''])
_register_formatter(PandasFormatter, '''pandas''', aliases=['''pd'''])
_register_formatter(CustomFormatter, '''custom''')
if config.TORCH_AVAILABLE:
from .torch_formatter import TorchFormatter
_register_formatter(TorchFormatter, '''torch''', aliases=['''pt''', '''pytorch'''])
else:
a : Optional[Any] = ValueError('''PyTorch needs to be installed to be able to return PyTorch tensors.''')
_register_unavailable_formatter(_torch_error, '''torch''', aliases=['''pt''', '''pytorch'''])
if config.TF_AVAILABLE:
from .tf_formatter import TFFormatter
_register_formatter(TFFormatter, '''tensorflow''', aliases=['''tf'''])
else:
a : Dict = ValueError('''Tensorflow needs to be installed to be able to return Tensorflow tensors.''')
_register_unavailable_formatter(_tf_error, '''tensorflow''', aliases=['''tf'''])
if config.JAX_AVAILABLE:
from .jax_formatter import JaxFormatter
_register_formatter(JaxFormatter, '''jax''', aliases=[])
else:
a : List[Any] = ValueError('''JAX needs to be installed to be able to return JAX arrays.''')
_register_unavailable_formatter(_jax_error, '''jax''', aliases=[])
def _UpperCamelCase ( _A ) -> Optional[str]:
"""simple docstring"""
if format_type in _FORMAT_TYPES_ALIASES:
return _FORMAT_TYPES_ALIASES[format_type]
else:
return format_type
def _UpperCamelCase ( _A , **_A ) -> Formatter:
"""simple docstring"""
_UpperCAmelCase = get_format_type_from_alias(_A )
if format_type in _FORMAT_TYPES:
return _FORMAT_TYPES[format_type](**_A )
if format_type in _FORMAT_TYPES_ALIASES_UNAVAILABLE:
raise _FORMAT_TYPES_ALIASES_UNAVAILABLE[format_type]
else:
raise ValueError(
F"""Return type should be None or selected in {list(type for type in _FORMAT_TYPES.keys() if type != None )}, but got '{format_type}'""" ) | 713 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : Optional[int]=13 , __UpperCamelCase : List[str]=7 , __UpperCamelCase : List[Any]=9 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : str=True , __UpperCamelCase : int=False , __UpperCamelCase : int=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : List[Any]=8 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : str=0.0_0_2 , __UpperCamelCase : Union[str, Any]=1 , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : Tuple=0 , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Any=None , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = encoder_seq_length
_UpperCAmelCase = decoder_seq_length
# For common tests
_UpperCAmelCase = self.decoder_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = d_ff
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = decoder_start_token_id
_UpperCAmelCase = None
_UpperCAmelCase = decoder_layers
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
return TaConfig.from_pretrained("""google/umt5-base""" )
def _snake_case ( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : str=None , ) ->int:
'''simple docstring'''
if attention_mask is None:
_UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCamelCase )
if decoder_head_mask is None:
_UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
if cross_attn_head_mask is None:
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = config.num_attention_heads
_UpperCAmelCase = self.prepare_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, input_dict
def _snake_case ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : Tuple ) ->Dict:
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , )
_UpperCAmelCase = model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase )
_UpperCAmelCase = result.last_hidden_state
_UpperCAmelCase = result.past_key_values
_UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , ) ->str:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).get_decoder().to(__UpperCamelCase ).eval()
# first forward pass
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) + 1 )
_UpperCAmelCase ,_UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = model(__UpperCamelCase )["""last_hidden_state"""]
_UpperCAmelCase = model(__UpperCamelCase , past_key_values=__UpperCamelCase )["""last_hidden_state"""]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def _snake_case ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Dict , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).to(__UpperCamelCase ).half().eval()
_UpperCAmelCase = model(**__UpperCamelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__UpperCamelCase ).any().item() )
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a : Optional[Any] = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a : Any = True
a : Optional[int] = False
a : Any = False
a : Optional[int] = True
a : Optional[Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a : int = [0.8, 0.9]
def _snake_case ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = UMTaModel(config_and_inputs[0] ).to(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=__UpperCamelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs[0]
_UpperCAmelCase = UMTaForConditionalGeneration(__UpperCamelCase ).eval()
model.to(__UpperCamelCase )
_UpperCAmelCase = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__UpperCamelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
}
for attn_name, (name, mask) in zip(__UpperCamelCase , head_masking.items() ):
_UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCamelCase )
_UpperCAmelCase = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , **__UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _snake_case ( self : Tuple ) ->List[Any]:
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__UpperCamelCase ).to(__UpperCamelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__UpperCamelCase , legacy=__UpperCamelCase )
_UpperCAmelCase = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase ).input_ids
# fmt: off
_UpperCAmelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model.generate(input_ids.to(__UpperCamelCase ) )
_UpperCAmelCase = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
import argparse
import random
import joblib
import numpy as np
import torch
from igf.igf import (
SecondaryLearner,
collect_objective_set,
compute_perplexity,
generate_datasets,
load_gpta,
recopy_gpta,
set_seed,
train_secondary_learner,
)
from torch.utils.data import DataLoader, RandomSampler
from transformers import GPTaLMHeadModel
def _UpperCamelCase ( _A=3_2 , _A=1_0 , _A=1_0_0 , _A=1_0_2_6 , _A=True , _A="data/tokenized_stories_train_wikitext103.jbl" , _A="igf_context_pairs.jbl" , ) -> List[str]:
"""simple docstring"""
set_seed(3 )
# generate train_data and objective_set
_UpperCAmelCase ,_UpperCAmelCase = generate_datasets(
_A , _A , number=_A , min_len=1_0_2_6 , trim=_A )
# keeps model same across runs
set_seed(4 )
# model, lm_optimizer, lm_scheduler = recopy_gpt2(model, device, max_steps) # store original model weights
# can we train on GPU?
_UpperCAmelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
# load pretrained model
_UpperCAmelCase = load_gpta("""gpt2""" ).to(_A )
print("""computing perplexity on objective set""" )
_UpperCAmelCase = compute_perplexity(_A , _A , _A ).item()
print("""perplexity on objective set:""" , _A )
# collect igf pairs and save to file demo.jbl
collect_objective_set(_A , _A , _A , _A , _A , _A , _A , _A )
# clean up, delete model and data we don't need anymore
del model, train_data, objective_set
torch.cuda.empty_cache()
def _UpperCamelCase ( _A , _A=1_5 , _A=1_2_8 , _A=1_0_0 , _A="igf_model.pt" , ) -> Any:
"""simple docstring"""
set_seed(4_2 )
# Load pre-trained model
_UpperCAmelCase = GPTaLMHeadModel.from_pretrained("""gpt2""" )
# Initialize secondary learner to use embedding weights of model
_UpperCAmelCase = SecondaryLearner(_A )
# Train secondary learner
_UpperCAmelCase = train_secondary_learner(
_A , _A , max_epochs=_A , batch_size=_A , eval_freq=1_0_0 , igf_model_path=_A , )
del model, secondary_learner_train_data
torch.cuda.empty_cache()
return secondary_learner
def _UpperCamelCase ( _A , _A , _A , _A=3_2 , _A=1_0_0_0 , _A=1_6 , _A=1.0 , _A=recopy_gpta , _A=None , _A=1_0 , _A="gpt2_finetuned.pt" , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = torch.device("""cuda:0""" if torch.cuda.is_available() else """cpu""" )
_UpperCAmelCase = RandomSampler(_A )
_UpperCAmelCase = DataLoader(_A , sampler=_A )
_UpperCAmelCase = max_steps // (len(_A )) + 1
_UpperCAmelCase = 0
_UpperCAmelCase = torch.zeros((1, context_len) , dtype=torch.long , device=_A )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = recopy_model(_A , _A , _A )
model.train()
if secondary_learner is not None:
secondary_learner.to(_A )
secondary_learner.eval()
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = []
_UpperCAmelCase = []
# Compute the performance of the transformer model at the beginning
_UpperCAmelCase = compute_perplexity(_A , _A , _A )
test_perps.append(_A )
print("""Test perplexity, step""" , _A , """:""" , _A )
for epoch in range(int(_A ) ):
for step, example in enumerate(_A ):
torch.cuda.empty_cache()
_UpperCAmelCase = random.randint(0 , example.size(2 ) - context_len - 1 )
_UpperCAmelCase = example[0, 0, start : start + context_len]
lm_optimizer.zero_grad()
_UpperCAmelCase = model(_A , labels=_A )
_UpperCAmelCase = True
if secondary_learner is not None:
_UpperCAmelCase = secondary_learner.forward(
torch.tensor(_A , dtype=torch.long , device=_A ).unsqueeze(0 ) )[0].item()
observed_qs.append(float(_A ) )
# Here we implement the simple non-constant threshold for the predicted IG(X) value
# We will decay the selectivity of our secondary learner filter from
# 1 standard deviation above average to 1 below average after 10 batches.
if global_step == 1_0:
_UpperCAmelCase = -1
if predicted_q < threshold:
_UpperCAmelCase = False
# If we passed the filter, add the context to the batch!
if do_backprop:
contexts.append(np.array(context.cpu() ) )
_UpperCAmelCase = outputs[0]
lm_loss.backward()
examples += 1
del outputs
# Once the batch is filled with enough contexts, backprop on the batch.
if examples == batch_size:
torch.cuda.empty_cache()
_UpperCAmelCase = 0
# Do LM backprop
torch.nn.utils.clip_grad_norm_(model.parameters() , 3.0 )
lm_optimizer.step()
lm_scheduler.step() # Update learning rate schedule
global_step += 1
# Compute the performance of the transformer model at this batch
if global_step % eval_interval == 0:
_UpperCAmelCase = compute_perplexity(_A , _A , _A )
test_perps.append(_A )
print("""Test perplexity, step""" , _A , """:""" , _A )
# Break out of the loop after 60 batches
if max_steps > 0 and global_step > 6_0:
break
if max_steps > 0 and global_step > 6_0:
break
# save finetuned transformer model
torch.save(model.state_dict() , _A )
torch.cuda.empty_cache()
# Do some cleaning up so we can reinitialize for the next run of this function
del lm_optimizer
del lm_scheduler
return model
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Fine-tune a transformer model with IGF on a language modeling task""" )
# Required parameters
parser.add_argument(
"""--data_dir""" , default=_A , type=_A , required=_A , help="""The input data dir. Should contain data files for WikiText.""" , )
parser.add_argument(
"""--model_name_or_path""" , default=_A , type=_A , required=_A , help="""Path to pretrained model or model identifier from huggingface.co/models""" , )
parser.add_argument(
"""--data_file""" , type=_A , default=_A , help=(
"""A jbl file containing tokenized data which can be split as objective dataset, """
"""train_dataset and test_dataset."""
) , )
parser.add_argument(
"""--igf_data_file""" , type=_A , default=_A , help="""A jbl file containing the context and information gain pairs to train secondary learner.""" , )
parser.add_argument(
"""--output_dir""" , default=_A , type=_A , required=_A , help="""The output directory where the final fine-tuned model is stored.""" , )
parser.add_argument(
"""--tokenizer_name""" , default=_A , type=_A , help="""Pretrained tokenizer name or path if not the same as model_name""" , )
parser.add_argument("""--seed""" , type=_A , default=_A , help="""A seed for reproducible training.""" )
parser.add_argument(
"""--context_len""" , default=3_2 , type=_A , help=(
"""The maximum total input sequence length after tokenization. Sequences longer """
"""than this will be truncated, sequences shorter will be padded."""
) , )
parser.add_argument(
"""--size_objective_set""" , default=1_0_0 , type=_A , help="""number of articles that are long enough to be used as our objective set""" , )
parser.add_argument(
"""--eval_freq""" , default=1_0_0 , type=_A , help="""secondary model evaluation is triggered at eval_freq""" )
parser.add_argument("""--max_steps""" , default=1_0_0_0 , type=_A , help="""To calculate training epochs""" )
parser.add_argument(
"""--secondary_learner_batch_size""" , default=1_2_8 , type=_A , help="""batch size of training data for secondary learner""" , )
parser.add_argument(
"""--batch_size""" , default=1_6 , type=_A , help="""batch size of training data of language model(gpt2) """ )
parser.add_argument(
"""--eval_interval""" , default=1_0 , type=_A , help=(
"""decay the selectivity of our secondary learner filter from"""
"""1 standard deviation above average to 1 below average after 10 batches"""
) , )
parser.add_argument(
"""--number""" , default=1_0_0 , type=_A , help="""The number of examples split to be used as objective_set/test_data""" )
parser.add_argument(
"""--min_len""" , default=1_0_2_6 , type=_A , help="""The minimum length of the article to be used as objective set""" )
parser.add_argument(
"""--secondary_learner_max_epochs""" , default=1_5 , type=_A , help="""number of epochs to train secondary learner""" )
parser.add_argument("""--trim""" , default=_A , type=_A , help="""truncate the example if it exceeds context length""" )
parser.add_argument(
"""--threshold""" , default=1.0 , type=_A , help=(
"""The threshold value used by secondary learner to filter the train_data and allow only"""
""" informative data as input to the model"""
) , )
parser.add_argument("""--finetuned_model_name""" , default="""gpt2_finetuned.pt""" , type=_A , help="""finetuned_model_name""" )
parser.add_argument(
"""--recopy_model""" , default=_A , type=_A , help="""Reset the model to the original pretrained GPT-2 weights after each iteration""" , )
# function calls
# Collecting *n* pairs of context and information gain(X, IG(X)) for training the secondary learner
generate_n_pairs(
context_len=3_2 , max_steps=1_0 , size_objective_set=1_0_0 , min_len=1_0_2_6 , trim=_A , data_file="""data/tokenized_stories_train_wikitext103.jbl""" , igf_data_file="""igf_context_pairs.jbl""" , )
# Load train data for secondary learner
_UpperCAmelCase = joblib.load("""data/IGF_values.jbl""" )
# Train secondary learner
_UpperCAmelCase = training_secondary_learner(
_A , secondary_learner_max_epochs=1_5 , secondary_learner_batch_size=1_2_8 , eval_freq=1_0_0 , igf_model_path="""igf_model.pt""" , )
# load pretrained gpt2 model
_UpperCAmelCase = GPTaLMHeadModel.from_pretrained("""gpt2""" )
set_seed(4_2 )
# Generate train and test data to train and evaluate gpt2 model
_UpperCAmelCase ,_UpperCAmelCase = generate_datasets(
context_len=3_2 , file="""data/tokenized_stories_train_wikitext103.jbl""" , number=1_0_0 , min_len=1_0_2_6 , trim=_A )
# fine-tuning of the gpt2 model using igf (Information Gain Filtration)
finetune(
_A , _A , _A , context_len=3_2 , max_steps=1_0_0_0 , batch_size=1_6 , threshold=1.0 , recopy_model=_A , secondary_learner=_A , eval_interval=1_0 , finetuned_model_name="""gpt2_finetuned.pt""" , )
if __name__ == "__main__":
main() | 714 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( _UpperCAmelCase ):
def _snake_case ( self : str ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : Any ) ->List[str]:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _snake_case ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : str ) ->Dict:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def _snake_case ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def _snake_case ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _snake_case ( self : str ) ->Optional[Any]:
'''simple docstring'''
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__UpperCamelCase ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
_UpperCAmelCase ,_UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __UpperCamelCase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(_A ) if isinstance(_A , pa.Buffer ) else pa.memory_map(_A )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=_A , features=_A ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_A )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
_UpperCAmelCase = os.path.join(_A , """test.arrow""" )
with ArrowWriter(path=_A , schema=pa.schema(_A ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(_A , 1 )
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
if pa.types.is_list(_A ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
if isinstance(lst[0] , _A ):
change_first_primitive_element_in_list(lst[0] , _A )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(_A , optimized_int_type=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(_A )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_A , _A )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=_A ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = """mock://dataset-train.arrow"""
with ArrowWriter(path=_A , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_A ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_A )
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_A , format="""png""" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=_A , features=Features({"""image""": Image()} ) , embed_local_files=_A ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , _A )
with open(_A , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("""col_1""" , pa.string() , nullable=_A )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=_A ) as writer:
writer._build_writer(inferred_schema=_A )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] ) | 19 | 0 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionImageVariationPipeline
from diffusers.utils.testing_utils import load_image, require_torch_gpu, slow, torch_device
a : Any = False
class a_ ( unittest.TestCase ):
pass
@slow
@require_torch_gpu
class a_ ( unittest.TestCase ):
def _snake_case ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = VersatileDiffusionImageVariationPipeline.from_pretrained("""shi-labs/versatile-diffusion""" )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe(
image=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_UpperCAmelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase = np.array([0.0_4_4_1, 0.0_4_6_9, 0.0_5_0_7, 0.0_5_7_5, 0.0_6_3_2, 0.0_6_5_0, 0.0_8_6_5, 0.0_9_0_9, 0.0_9_4_5] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2 | 715 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : List[Any] = get_logger()
a : Optional[dict] = None
class a_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : int ) ->Tuple:
'''simple docstring'''
super().__init__(features=__UpperCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(__UpperCamelCase )}, as `jaxlib.xla_extension.Device` """
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
_UpperCAmelCase = device if isinstance(__UpperCamelCase , __UpperCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
_UpperCAmelCase = str(jax.devices()[0] )
_UpperCAmelCase = jnp_array_kwargs
@staticmethod
def _snake_case ( ) ->Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(__UpperCamelCase ): device for device in jax.devices()}
def _snake_case ( self : Dict , __UpperCamelCase : Any ) ->Union[str, Any]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , __UpperCamelCase ) and column:
if all(
isinstance(__UpperCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__UpperCamelCase , axis=0 )
return column
def _snake_case ( self : List[str] , __UpperCamelCase : Any ) ->Optional[int]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , (str, bytes, type(__UpperCamelCase )) ):
return value
elif isinstance(__UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase = {}
if isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
else:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
elif isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCamelCase , PIL.Image.Image ):
_UpperCAmelCase = np.asarray(__UpperCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__UpperCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__UpperCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__UpperCamelCase , """__array__""" ) and not isinstance(__UpperCamelCase , jax.Array ):
_UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
elif isinstance(__UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : dict ) ->int:
'''simple docstring'''
return map_nested(self._recursive_tensorize , __UpperCamelCase , map_list=__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_row(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_row(__UpperCamelCase )
return self.recursive_tensorize(__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : pa.Table ) ->"jax.Array":
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_column(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_column(__UpperCamelCase , pa_table.column_names[0] )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
_UpperCAmelCase = self._consolidate(__UpperCamelCase )
return column
def _snake_case ( self : Optional[Any] , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_batch(__UpperCamelCase )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
for column_name in batch:
_UpperCAmelCase = self._consolidate(batch[column_name] )
return batch | 19 | 0 |
"""simple docstring"""
from timeit import timeit
a : Any = {
'''MALAYALAM''': True,
'''String''': False,
'''rotor''': True,
'''level''': True,
'''A''': True,
'''BB''': True,
'''ABC''': False,
'''amanaplanacanalpanama''': True, # "a man a plan a canal panama"
}
# Ensure our test data is valid
assert all((key == key[::-1]) is value for key, value in test_data.items())
def _UpperCamelCase ( _A ) -> bool:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = len(_A ) - 1
while start_i < end_i:
if s[start_i] == s[end_i]:
start_i += 1
end_i -= 1
else:
return False
return True
def _UpperCamelCase ( _A ) -> bool:
"""simple docstring"""
_UpperCAmelCase = len(_A ) // 2
_UpperCAmelCase = len(_A )
# We need to traverse till half of the length of string
# as we can get access of the i'th last element from
# i'th index.
# eg: [0,1,2,3,4,5] => 4th index can be accessed
# with the help of 1st index (i==n-i-1)
# where n is length of string
return all(s[i] == s[n - i - 1] for i in range(_A ) )
def _UpperCamelCase ( _A ) -> bool:
"""simple docstring"""
if len(_A ) <= 2:
return True
if s[0] == s[len(_A ) - 1]:
return is_palindrome_recursive(s[1:-1] )
else:
return False
def _UpperCamelCase ( _A ) -> bool:
"""simple docstring"""
return s == s[::-1]
def _UpperCamelCase ( _A ) -> None:
"""simple docstring"""
_UpperCAmelCase = F"""all({name}(key) is value for key, value in test_data.items())"""
_UpperCAmelCase = F"""from __main__ import test_data, {name}"""
_UpperCAmelCase = 5_0_0_0_0_0
_UpperCAmelCase = timeit(stmt=_A , setup=_A , number=_A )
print(F"""{name:<35} finished {number:,} runs in {result:.5f} seconds""" )
if __name__ == "__main__":
for key, value in test_data.items():
assert is_palindrome(key) is is_palindrome_recursive(key)
assert is_palindrome(key) is is_palindrome_slice(key)
print(F"{key:21} {value}")
print('''a man a plan a canal panama''')
# finished 500,000 runs in 0.46793 seconds
benchmark_function('''is_palindrome_slice''')
# finished 500,000 runs in 0.85234 seconds
benchmark_function('''is_palindrome''')
# finished 500,000 runs in 1.32028 seconds
benchmark_function('''is_palindrome_recursive''')
# finished 500,000 runs in 2.08679 seconds
benchmark_function('''is_palindrome_traversal''') | 716 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Tuple = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 19 | 0 |
"""simple docstring"""
import numpy as np
import torch
from torch.nn import CrossEntropyLoss
from transformers import AutoModelForCausalLM, AutoTokenizer
import datasets
from datasets import logging
a : List[Any] = '''\
'''
a : List[Any] = '''
Perplexity (PPL) is one of the most common metrics for evaluating language models.
It is defined as the exponentiated average negative log-likelihood of a sequence.
For more information, see https://huggingface.co/docs/transformers/perplexity
'''
a : Union[str, Any] = '''
Args:
model_id (str): model used for calculating Perplexity
NOTE: Perplexity can only be calculated for causal language models.
This includes models such as gpt2, causal variations of bert,
causal versions of t5, and more (the full list can be found
in the AutoModelForCausalLM documentation here:
https://huggingface.co/docs/transformers/master/en/model_doc/auto#transformers.AutoModelForCausalLM )
input_texts (list of str): input text, each separate text snippet
is one list entry.
batch_size (int): the batch size to run texts through the model. Defaults to 16.
add_start_token (bool): whether to add the start token to the texts,
so the perplexity can include the probability of the first word. Defaults to True.
device (str): device to run on, defaults to \'cuda\' when available
Returns:
perplexity: dictionary containing the perplexity scores for the texts
in the input list, as well as the mean perplexity. If one of the input texts is
longer than the max input length of the model, then it is truncated to the
max length for the perplexity computation.
Examples:
Example 1:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = ["lorem ipsum", "Happy Birthday!", "Bienvenue"]
>>> results = perplexity.compute(model_id=\'gpt2\',
... add_start_token=False,
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
78.22
>>> print(round(results["perplexities"][0], 2))
11.11
Example 2:
>>> perplexity = datasets.load_metric("perplexity")
>>> input_texts = datasets.load_dataset("wikitext",
... "wikitext-2-raw-v1",
... split="test")["text"][:50] # doctest:+ELLIPSIS
[...]
>>> input_texts = [s for s in input_texts if s!=\'\']
>>> results = perplexity.compute(model_id=\'gpt2\',
... input_texts=input_texts) # doctest:+ELLIPSIS
>>> print(list(results.keys()))
[\'perplexities\', \'mean_perplexity\']
>>> print(round(results["mean_perplexity"], 2))
60.35
>>> print(round(results["perplexities"][0], 2))
81.12
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def _snake_case ( self : Any ) ->List[str]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""input_texts""": datasets.Value("""string""" ),
} ) , reference_urls=["""https://huggingface.co/docs/transformers/perplexity"""] , )
def _snake_case ( self : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : int = 16 , __UpperCamelCase : bool = True , __UpperCamelCase : Any=None ) ->Union[str, Any]:
'''simple docstring'''
if device is not None:
assert device in ["gpu", "cpu", "cuda"], "device should be either gpu or cpu."
if device == "gpu":
_UpperCAmelCase = """cuda"""
else:
_UpperCAmelCase = """cuda""" if torch.cuda.is_available() else """cpu"""
_UpperCAmelCase = AutoModelForCausalLM.from_pretrained(__UpperCamelCase )
_UpperCAmelCase = model.to(__UpperCamelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained(__UpperCamelCase )
# if batch_size > 1 (which generally leads to padding being required), and
# if there is not an already assigned pad_token, assign an existing
# special token to also be the padding token
if tokenizer.pad_token is None and batch_size > 1:
_UpperCAmelCase = list(tokenizer.special_tokens_map_extended.values() )
# check that the model already has at least one special token defined
assert (
len(__UpperCamelCase ) > 0
), "If batch_size > 1, model must have at least one special token to use for padding. Please use a different model or set batch_size=1."
# assign one of the special tokens to also be the pad token
tokenizer.add_special_tokens({"""pad_token""": existing_special_tokens[0]} )
if add_start_token:
# leave room for <BOS> token to be added:
assert (
tokenizer.bos_token is not None
), "Input model must already have a BOS token if using add_start_token=True. Please use a different model, or set add_start_token=False"
_UpperCAmelCase = model.config.max_length - 1
else:
_UpperCAmelCase = model.config.max_length
_UpperCAmelCase = tokenizer(
__UpperCamelCase , add_special_tokens=__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , max_length=__UpperCamelCase , return_tensors="""pt""" , return_attention_mask=__UpperCamelCase , ).to(__UpperCamelCase )
_UpperCAmelCase = encodings["""input_ids"""]
_UpperCAmelCase = encodings["""attention_mask"""]
# check that each input is long enough:
if add_start_token:
assert torch.all(torch.ge(attn_masks.sum(1 ) , 1 ) ), "Each input text must be at least one token long."
else:
assert torch.all(
torch.ge(attn_masks.sum(1 ) , 2 ) ), "When add_start_token=False, each input text must be at least two tokens long. Run with add_start_token=True if inputting strings of only one token, and remove all empty input strings."
_UpperCAmelCase = []
_UpperCAmelCase = CrossEntropyLoss(reduction="""none""" )
for start_index in logging.tqdm(range(0 , len(__UpperCamelCase ) , __UpperCamelCase ) ):
_UpperCAmelCase = min(start_index + batch_size , len(__UpperCamelCase ) )
_UpperCAmelCase = encoded_texts[start_index:end_index]
_UpperCAmelCase = attn_masks[start_index:end_index]
if add_start_token:
_UpperCAmelCase = torch.tensor([[tokenizer.bos_token_id]] * encoded_batch.size(dim=0 ) ).to(__UpperCamelCase )
_UpperCAmelCase = torch.cat([bos_tokens_tensor, encoded_batch] , dim=1 )
_UpperCAmelCase = torch.cat(
[torch.ones(bos_tokens_tensor.size() , dtype=torch.intaa ).to(__UpperCamelCase ), attn_mask] , dim=1 )
_UpperCAmelCase = encoded_batch
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase ).logits
_UpperCAmelCase = out_logits[..., :-1, :].contiguous()
_UpperCAmelCase = labels[..., 1:].contiguous()
_UpperCAmelCase = attn_mask[..., 1:].contiguous()
_UpperCAmelCase = torch.expa(
(loss_fct(shift_logits.transpose(1 , 2 ) , __UpperCamelCase ) * shift_attention_mask_batch).sum(1 )
/ shift_attention_mask_batch.sum(1 ) )
ppls += perplexity_batch.tolist()
return {"perplexities": ppls, "mean_perplexity": np.mean(__UpperCamelCase )} | 717 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
a : int = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _UpperCamelCase ( _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = test_results.split(""" """ )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_UpperCAmelCase = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(_A ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = None
_UpperCAmelCase = False
for line in failures_short_lines.split("""\n""" ):
if re.search(R"""_ \[doctest\]""" , _A ):
_UpperCAmelCase = True
_UpperCAmelCase = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
_UpperCAmelCase = line
_UpperCAmelCase = False
return failures
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = title
_UpperCAmelCase = doc_test_results["""time_spent"""].split(""",""" )[0]
_UpperCAmelCase = doc_test_results["""success"""]
_UpperCAmelCase = doc_test_results["""failures"""]
_UpperCAmelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
_UpperCAmelCase = doc_test_results
@property
def _snake_case ( self : int ) ->str:
'''simple docstring'''
_UpperCAmelCase = [self._time_spent]
_UpperCAmelCase = 0
for time in time_spent:
_UpperCAmelCase = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = [0, 0, time_parts[0]]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f"""{int(__UpperCamelCase )}h{int(__UpperCamelCase )}m{int(__UpperCamelCase )}s"""
@property
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = 40
_UpperCAmelCase = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(__UpperCamelCase , __UpperCamelCase )}
_UpperCAmelCase = """"""
for category, failures in category_failures.items():
if len(__UpperCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__UpperCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__UpperCamelCase )
@staticmethod
def _snake_case ( ) ->Any:
'''simple docstring'''
_UpperCAmelCase = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(__UpperCamelCase )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=__UpperCamelCase , )
def _snake_case ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
_UpperCAmelCase = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else """All tests passed."""
_UpperCAmelCase = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=__UpperCamelCase , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase = """"""
for key, value in failures.items():
_UpperCAmelCase = value[:2_00] + """ [Truncated]""" if len(__UpperCamelCase ) > 2_50 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
_UpperCAmelCase = job_name
_UpperCAmelCase = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
_UpperCAmelCase = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _snake_case ( self : int ) ->Optional[Any]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
_UpperCAmelCase = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
_UpperCAmelCase = sorted(self.doc_test_results.items() , key=lambda __UpperCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
_UpperCAmelCase = f"""*Num failures* :{len(job_result["failed"] )} \n"""
_UpperCAmelCase = job_result["""failures"""]
_UpperCAmelCase = self.get_reply_blocks(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , text=__UpperCamelCase )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f"""Results for {job}""" , blocks=__UpperCamelCase , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = os.environ["""GITHUB_RUN_ID"""]
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A ).json()
_UpperCAmelCase = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , _A )
return {}
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {}
if os.path.exists(_A ):
_UpperCAmelCase = os.listdir(_A )
for file in files:
try:
with open(os.path.join(_A , _A ) , encoding="""utf-8""" ) as f:
_UpperCAmelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(_A , _A )}.""" ) from e
return _artifact
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
class a_ :
def __init__( self : List[Any] , __UpperCamelCase : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = name
_UpperCAmelCase = []
def __str__( self : int ) ->Optional[Any]:
'''simple docstring'''
return self.name
def _snake_case ( self : Dict , __UpperCamelCase : str ) ->int:
'''simple docstring'''
self.paths.append({"""name""": self.name, """path""": path} )
_UpperCAmelCase = {}
_UpperCAmelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
_UpperCAmelCase = directory
if artifact_name not in _available_artifacts:
_UpperCAmelCase = Artifact(_A )
_available_artifacts[artifact_name].add_path(_A )
return _available_artifacts
if __name__ == "__main__":
a : Dict = get_job_links()
a : Dict = retrieve_available_artifacts()
a : Optional[int] = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
a : Dict = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
a : int = github_actions_job_links.get('''run_doctests''')
a : Tuple = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
a : Optional[Any] = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
a , a , a : str = handle_test_results(artifact['''stats'''])
a : Tuple = failed
a : int = success
a : Any = time_spent[1:-1] + ''', '''
a : Dict = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
a : List[Any] = line.replace('''FAILED ''', '''''')
a : Tuple = line.split()[0].replace('''\n''', '''''')
if "::" in line:
a , a : Union[str, Any] = line.split('''::''')
else:
a , a : Optional[Any] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
a : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
a : Optional[Any] = all_failures[test] if test in all_failures else '''N/A'''
a : List[str] = failure
break
a : List[Any] = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply() | 19 | 0 |
from collections import Counter
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
a : List[str] = datasets.load_iris()
a : int = np.array(data['''data'''])
a : Optional[int] = np.array(data['''target'''])
a : Optional[int] = data['''target_names''']
a : Optional[Any] = train_test_split(X, y)
def _UpperCamelCase ( _A , _A ) -> Tuple:
"""simple docstring"""
return np.linalg.norm(np.array(_A ) - np.array(_A ) )
def _UpperCamelCase ( _A , _A , _A , _A , _A=5 ) -> str:
"""simple docstring"""
_UpperCAmelCase = zip(_A , _A )
# List of distances of all points from the point to be classified
_UpperCAmelCase = []
for data_point in data:
_UpperCAmelCase = euclidean_distance(data_point[0] , _A )
distances.append((distance, data_point[1]) )
# Choosing 'k' points with the least distances.
_UpperCAmelCase = [i[1] for i in sorted(_A )[:k]]
# Most commonly occurring class among them
# is the class into which the point is classified
_UpperCAmelCase = Counter(_A ).most_common(1 )[0][0]
return classes[result]
if __name__ == "__main__":
print(classifier(X_train, y_train, classes, [4.4, 3.1, 1.3, 1.4])) | 718 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase ( _A , _A=False ) -> str:
"""simple docstring"""
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
a : Union[str, Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
a : Tuple = parse_flag_from_env('''RUN_REMOTE''', default=False)
a : Union[str, Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
a : int = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
a : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
a : List[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
a : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
a : int = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
a : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
a : Any = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
a : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires faiss""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires regex""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires elasticsearch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires sqlalchemy""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires PyTorch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
if not config.TF_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires TensorFlow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
if not config.JAX_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires JAX""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not config.PIL_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires Pillow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
def _require_spacy_model(_A ):
try:
import spacy # noqa F401
spacy.load(_A )
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(_A ) )(_A )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCAmelCase = unittest.skip("""test is slow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
_UpperCAmelCase = unittest.skip("""test is local""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCAmelCase = unittest.skip("""test is packaged""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCAmelCase = unittest.skip("""test requires remote""" )(_A )
return test_case
def _UpperCamelCase ( *_A ) -> Dict:
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_A ) and name.startswith("""test""" ):
for decorator in decorators:
_UpperCAmelCase = decorator(_A )
setattr(cls , _A , _A )
return cls
return decorate
class a_ ( _UpperCAmelCase ):
pass
class a_ ( _UpperCAmelCase ):
a : Any = 0
a : Optional[Any] = 1
a : int = 2
@contextmanager
def _UpperCamelCase ( _A=OfflineSimulationMode.CONNECTION_FAILS , _A=1e-16 ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = requests.Session().request
def timeout_request(_A , _A , _A , **_A ):
# Change the url to an invalid url so that the connection hangs
_UpperCAmelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_UpperCAmelCase = timeout
try:
return online_request(_A , _A , **_A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCAmelCase = url
_UpperCAmelCase = e.args[0]
_UpperCAmelCase = (max_retry_error.args[0].replace("""10.255.255.1""" , F"""OfflineMock[{url}]""" ),)
_UpperCAmelCase = (max_retry_error,)
raise
def raise_connection_error(_A , _A , **_A ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=_A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , _A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , _A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , _A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _UpperCamelCase ( *_A , **_A ) -> str:
"""simple docstring"""
_UpperCAmelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_A , **_A ) as tmp_dir:
try:
os.chdir(_A )
yield
finally:
os.chdir(_A )
@contextmanager
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
return deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist()
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_A , *_A , **_A ):
try:
return func(*_A , **_A )
except HTTPError as err:
if str(_A ).startswith("""500""" ) or str(_A ).startswith("""502""" ):
pytest.xfail(str(_A ) )
raise err
return decorator.decorator(_wrapper , _A )
class a_ :
def __init__( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_A )
else:
break
async def _UpperCamelCase ( _A , _A=None , _A=None , _A=None , _A=False , _A=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(_A ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_A , _A , _A , _A="" ):
_UpperCAmelCase = line.decode("""utf-8""" ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _A : tee(_A , _A , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda _A : tee(_A , _A , sys.stderr , label="""stderr:""" ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def _UpperCamelCase ( _A , _A=None , _A=None , _A=1_8_0 , _A=False , _A=True ) -> _RunOutput:
"""simple docstring"""
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
_UpperCAmelCase = """ """.join(_A )
if result.returncode > 0:
_UpperCAmelCase = """\n""".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
_UpperCAmelCase = re.sub(R"""^gw""" , """""" , _A , 0 , re.M )
return int(_A )
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 2_9_5_0_0
_UpperCAmelCase = pytest_xdist_worker_id()
return port + uniq_delta | 19 | 0 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a : List[str] = logging.get_logger(__name__)
class a_ ( enum.Enum ):
a : Optional[Any] = 0
a : Dict = 1
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'generated'
def __init__( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : str ) ->Any:
'''simple docstring'''
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _snake_case ( self : Optional[int] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Dict=None , **__UpperCamelCase : Any , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
if truncation is not None:
_UpperCAmelCase = truncation
_UpperCAmelCase = generate_kwargs
_UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
_UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCAmelCase = self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
_UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _snake_case ( self : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
return True
def _snake_case ( self : Optional[Any] , *__UpperCamelCase : Any , __UpperCamelCase : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
_UpperCAmelCase = ([prefix + arg for arg in args[0]],)
_UpperCAmelCase = True
elif isinstance(args[0] , __UpperCamelCase ):
_UpperCAmelCase = (prefix + args[0],)
_UpperCAmelCase = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
_UpperCAmelCase = self.tokenizer(*__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Dict , *__UpperCamelCase : str , **__UpperCamelCase : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = super().__call__(*__UpperCamelCase , **__UpperCamelCase )
if (
isinstance(args[0] , __UpperCamelCase )
and all(isinstance(__UpperCamelCase , __UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _snake_case ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : str=TruncationStrategy.DO_NOT_TRUNCATE , **__UpperCamelCase : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self._parse_and_tokenize(__UpperCamelCase , truncation=__UpperCamelCase , **__UpperCamelCase )
return inputs
def _snake_case ( self : str , __UpperCamelCase : Dict , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
if self.framework == "pt":
_UpperCAmelCase ,_UpperCAmelCase = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
_UpperCAmelCase ,_UpperCAmelCase = tf.shape(model_inputs["""input_ids"""] ).numpy()
_UpperCAmelCase = generate_kwargs.get("""min_length""" , self.model.config.min_length )
_UpperCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__UpperCamelCase , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
_UpperCAmelCase = self.model.generate(**__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCAmelCase = output_ids.reshape(__UpperCamelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
_UpperCAmelCase = tf.reshape(__UpperCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any]=ReturnType.TEXT , __UpperCamelCase : int=False ) ->Any:
'''simple docstring'''
_UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCAmelCase = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
_UpperCAmelCase = {
f"""{self.return_name}_text""": self.tokenizer.decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , )
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'summary'
def __call__( self : Optional[Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[int] ) ->Any:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : str , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : Optional[int] = 'translation'
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def _snake_case ( self : Tuple , *__UpperCamelCase : List[str] , __UpperCamelCase : Tuple=TruncationStrategy.DO_NOT_TRUNCATE , __UpperCamelCase : Tuple=None , __UpperCamelCase : Union[str, Any]=None ) ->Tuple:
'''simple docstring'''
if getattr(self.tokenizer , """_build_translation_inputs""" , __UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase , return_tensors=self.framework , truncation=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase , truncation=__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : int=None , __UpperCamelCase : int=None , **__UpperCamelCase : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
_UpperCAmelCase = src_lang
if tgt_lang is not None:
_UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCAmelCase = kwargs.get("""task""" , self.task )
_UpperCAmelCase = task.split("""_""" )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
_UpperCAmelCase = items[1]
_UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->int:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase ) | 719 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class a_ ( _UpperCAmelCase ):
a : List[Any] = ''
a : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Tuple , __UpperCamelCase : Optional[DatasetInfo] = None , __UpperCamelCase : Optional[str] = None , **__UpperCamelCase : Any , ) ->Any:
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
_UpperCAmelCase = repo_info
_UpperCAmelCase = token
_UpperCAmelCase = None
def _snake_case ( self : List[str] ) ->List[str]:
'''simple docstring'''
if self.dir_cache is None:
_UpperCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_UpperCAmelCase = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : str = "rb" , **__UpperCamelCase : Any , ) ->List[str]:
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
_UpperCAmelCase = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def _snake_case ( self : int , __UpperCamelCase : int , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple=False , **__UpperCamelCase : List[str] ) ->Optional[Any]:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = PurePosixPath(path.strip("""/""" ) )
_UpperCAmelCase = {}
for p, f in self.dir_cache.items():
_UpperCAmelCase = PurePosixPath(p.strip("""/""" ) )
_UpperCAmelCase = p.parent
if root == path:
_UpperCAmelCase = f
_UpperCAmelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 19 | 0 |
import argparse
import json
import os
import torch
from torch import nn
from transformers import NllbMoeConfig, NllbMoeModel
from transformers.modeling_utils import dtype_byte_size
from transformers.utils import WEIGHTS_INDEX_NAME, WEIGHTS_NAME
def _UpperCamelCase ( _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = [
"""encoder.version""",
"""decoder.version""",
"""model.encoder.version""",
"""model.decoder.version""",
"""decoder.output_projection.weight""",
"""_float_tensor""",
"""encoder.embed_positions._float_tensor""",
"""decoder.embed_positions._float_tensor""",
]
for k in ignore_keys:
state_dict.pop(_A , _A )
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase ,_UpperCAmelCase = emb.weight.shape
_UpperCAmelCase = nn.Linear(_A , _A , bias=_A )
_UpperCAmelCase = emb.weight.data
return lin_layer
def _UpperCamelCase ( _A , _A=None ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {}
for old_key in state_dict.keys():
_UpperCAmelCase = old_key
if "moe_layer.experts." in key:
if expert_idx is not None:
_UpperCAmelCase = key.replace("""moe_layer.experts.0""" , F"""ffn.experts.expert_{expert_idx}""" )
else:
_UpperCAmelCase = key.replace("""moe_layer.experts.""" , """ffn.experts.expert_""" )
if "gate" in key:
_UpperCAmelCase = key.replace(""".moe_layer.gate.wg""" , """.ffn.router.classifier""" )
if "fc2" and "experts" not in key:
_UpperCAmelCase = key.replace(""".fc2.""" , """.ffn.fc2.""" )
if "fc1" and "experts" not in key:
_UpperCAmelCase = key.replace(""".fc1.""" , """.ffn.fc1.""" )
if ".encoder_attn." in key:
_UpperCAmelCase = key.replace(""".encoder_attn.""" , """.cross_attention.""" )
if "encoder_attn_layer_norm" in key:
_UpperCAmelCase = key.replace("""encoder_attn_layer_norm""" , """cross_attention_layer_norm""" )
if "final_layer_norm" in key:
_UpperCAmelCase = key.replace("""final_layer_norm""" , """ff_layer_norm""" )
_UpperCAmelCase = state_dict[old_key]
return new_dict
def _UpperCamelCase ( _A , _A , _A , _A , _A = WEIGHTS_NAME ) -> str:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = 0
os.makedirs(_A , exist_ok=_A )
for expert in range(_A ):
_UpperCAmelCase = switch_checkpoint_path + F"""-rank-{expert}.pt"""
if os.path.isfile(_A ):
_UpperCAmelCase = torch.load(_A )["""model"""]
remove_ignore_keys_(_A )
_UpperCAmelCase = rename_fairseq_keys(_A , _A )
_UpperCAmelCase = os.path.join(
_A , weights_name.replace(""".bin""" , F"""-{len(_A )+1:05d}-of-???.bin""" ) )
torch.save(_A , _A )
sharded_state_dicts.append(expert_state.keys() )
total_size += sum([value.numel() for key, value in expert_state.items()] ) * dtype_byte_size(
expert_state[list(_A )[0]].dtype )
# Add the last block
_UpperCAmelCase = os.path.join(_A , weights_name.replace(""".bin""" , F"""-{len(_A )+1:05d}-of-???.bin""" ) )
_UpperCAmelCase = torch.load(switch_checkpoint_path + """-shared.pt""" )["""model"""]
remove_ignore_keys_(_A )
_UpperCAmelCase = rename_fairseq_keys(_A , _A )
_UpperCAmelCase = shared_weights["""decoder.embed_tokens.weight"""]
sharded_state_dicts.append(shared_weights.keys() )
# If we only have the shared weights (dummy model/experts saved on the same file)
if len(_A ) == 1:
_UpperCAmelCase = os.path.join(_A , _A )
torch.save(_A , _A )
return {weights_name: sharded_state_dicts[0]}, None
else:
torch.save(_A , _A )
# Otherwise, let's build the index
_UpperCAmelCase = {}
for idx, shard in enumerate(_A ):
_UpperCAmelCase = weights_name.replace(""".bin""" , F"""-{idx+1:05d}-of-{len(_A ):05d}.bin""" )
_UpperCAmelCase = os.path.join(_A , weights_name.replace(""".bin""" , F"""-{idx+1:05d}-of-???.bin""" ) )
os.rename(_A , os.path.join(_A , _A ) )
for key in shard:
_UpperCAmelCase = shard_file
# Add the metadata
_UpperCAmelCase = {"""total_size""": total_size}
_UpperCAmelCase = {"""metadata""": metadata, """weight_map""": weight_map}
with open(os.path.join(_A , _A ) , """w""" , encoding="""utf-8""" ) as f:
_UpperCAmelCase = json.dumps(_A , indent=2 , sort_keys=_A ) + """\n"""
f.write(_A )
return metadata, index
if __name__ == "__main__":
a : Optional[Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--nllb_moe_checkpoint_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/model_moe_54b/checkpoint_2_300000''',
type=str,
required=False,
help='''Path to a directory containing a folder per layer. Follows the original Google format.''',
)
parser.add_argument('''--dtype''', default='''float32''', type=str, required=False, help='''dtype of the saved model''')
parser.add_argument(
'''--pytorch_dump_folder_path''',
default='''/home/arthur_huggingface_co/fairseq/weights/checkpoints/hf-converted-moe-54b''',
type=str,
required=False,
help='''Path to the output pytorch model.''',
)
a : Optional[int] = parser.parse_args()
a : Optional[Any] = shard_on_the_fly(
args.nllb_moe_checkpoint_path,
args.pytorch_dump_folder_path,
1_2_8,
args.dtype,
)
a : Union[str, Any] = NllbMoeConfig.from_pretrained(
'''facebook/nllb-200-3.3B''', encoder_sparse_step=4, decoder_sparse_step=4, num_experts=1_2_8
)
config.save_pretrained(args.pytorch_dump_folder_path)
a : Union[str, Any] = NllbMoeModel.from_pretrained(args.pytorch_dump_folder_path)
print('''Done''')
model.save_pretrained(args.pytorch_dump_folder_path) | 720 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
a : Optional[Any] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
a : List[str] = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
a : Any = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
a : int = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
a : List[Any] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any]=[1, 10, 1_00] , __UpperCamelCase : Dict=4 , __UpperCamelCase : Tuple=3.0 ) ->Union[str, Any]:
'''simple docstring'''
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=__UpperCamelCase ) as executor:
_UpperCAmelCase = []
_UpperCAmelCase = Counter()
_UpperCAmelCase = 0
_UpperCAmelCase = defaultdict(__UpperCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(__UpperCamelCase , __UpperCamelCase ) ):
for candidate in candidates:
_UpperCAmelCase = candidate + """\n""" + test_case
_UpperCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
_UpperCAmelCase = executor.submit(__UpperCamelCase , *__UpperCamelCase )
futures.append(__UpperCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__UpperCamelCase ):
_UpperCAmelCase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
_UpperCAmelCase ,_UpperCAmelCase = [], []
for result in results.values():
result.sort()
_UpperCAmelCase = [r[1]["""passed"""] for r in result]
total.append(len(__UpperCamelCase ) )
correct.append(sum(__UpperCamelCase ) )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = k
_UpperCAmelCase = {f"""pass@{k}""": estimate_pass_at_k(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
def estimator(_A , _A , _A ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_A , _A ):
_UpperCAmelCase = itertools.repeat(_A , len(_A ) )
else:
assert len(_A ) == len(_A )
_UpperCAmelCase = iter(_A )
return np.array([estimator(int(_A ) , int(_A ) , _A ) for n, c in zip(_A , _A )] ) | 19 | 0 |
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
is_valid_image,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
a : Union[str, Any] = logging.get_logger(__name__)
def _UpperCamelCase ( _A ) -> List[List[ImageInput]]:
"""simple docstring"""
if isinstance(_A , (list, tuple) ) and isinstance(videos[0] , (list, tuple) ) and is_valid_image(videos[0][0] ):
return videos
elif isinstance(_A , (list, tuple) ) and is_valid_image(videos[0] ):
return [videos]
elif is_valid_image(_A ):
return [[videos]]
raise ValueError(F"""Could not make batched video from {videos}""" )
class a_ ( _UpperCAmelCase ):
a : List[str] = ['pixel_values']
def __init__( self : Dict , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_55 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : Optional[int] , ) ->None:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 2_24}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , param_name="""crop_size""" )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = resample
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[Any] , ) ->np.ndarray:
'''simple docstring'''
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" in size:
_UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size["""shortest_edge"""] , default_to_square=__UpperCamelCase )
elif "height" in size and "width" in size:
_UpperCAmelCase = (size["""height"""], size["""width"""])
else:
raise ValueError(f"""Size must have 'height' and 'width' or 'shortest_edge' as keys. Got {size.keys()}""" )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , ) ->np.ndarray:
'''simple docstring'''
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""Size must have 'height' and 'width' as keys. Got {size.keys()}""" )
return center_crop(__UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[int, float] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Any , ) ->int:
'''simple docstring'''
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[Any] , ) ->np.ndarray:
'''simple docstring'''
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Any , __UpperCamelCase : ImageInput , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = None , __UpperCamelCase : float = None , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[ChannelDimension] = ChannelDimension.FIRST , ) ->np.ndarray:
'''simple docstring'''
if do_resize and size is None or resample is None:
raise ValueError("""Size and resample must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_UpperCAmelCase = to_numpy_array(__UpperCamelCase )
if do_resize:
_UpperCAmelCase = self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase )
if do_center_crop:
_UpperCAmelCase = self.center_crop(__UpperCamelCase , size=__UpperCamelCase )
if do_rescale:
_UpperCAmelCase = self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase )
if do_normalize:
_UpperCAmelCase = self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase )
_UpperCAmelCase = to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase )
return image
def _snake_case ( self : Optional[int] , __UpperCamelCase : ImageInput , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = None , __UpperCamelCase : float = None , __UpperCamelCase : bool = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : ChannelDimension = ChannelDimension.FIRST , **__UpperCamelCase : Optional[Any] , ) ->PIL.Image.Image:
'''simple docstring'''
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , param_name="""crop_size""" )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
_UpperCAmelCase = make_batched(__UpperCamelCase )
_UpperCAmelCase = [
[
self._preprocess_image(
image=__UpperCamelCase , do_resize=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , do_center_crop=__UpperCamelCase , crop_size=__UpperCamelCase , do_rescale=__UpperCamelCase , rescale_factor=__UpperCamelCase , do_normalize=__UpperCamelCase , image_mean=__UpperCamelCase , image_std=__UpperCamelCase , data_format=__UpperCamelCase , )
for img in video
]
for video in videos
]
_UpperCAmelCase = {"""pixel_values""": videos}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase ) | 721 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> np.array:
"""simple docstring"""
_UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCAmelCase = np.zeros((n + 1,) )
_UpperCAmelCase = ya
_UpperCAmelCase = xa
for k in range(_A ):
_UpperCAmelCase = y[k] + step_size * ode_func(_A , y[k] )
_UpperCAmelCase = y[k] + (
(step_size / 2) * (ode_func(_A , y[k] ) + ode_func(x + step_size , _A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 19 | 0 |
"""simple docstring"""
import unittest
from transformers import MobileBertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
MobileBertModel,
)
class a_ :
def __init__( self : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int]=13 , __UpperCamelCase : Dict=7 , __UpperCamelCase : str=True , __UpperCamelCase : int=True , __UpperCamelCase : int=True , __UpperCamelCase : str=True , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : int=64 , __UpperCamelCase : Optional[int]=32 , __UpperCamelCase : Tuple=5 , __UpperCamelCase : Tuple=4 , __UpperCamelCase : List[Any]=37 , __UpperCamelCase : str="gelu" , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Dict=5_12 , __UpperCamelCase : Union[str, Any]=16 , __UpperCamelCase : int=2 , __UpperCamelCase : Dict=0.0_2 , __UpperCamelCase : List[str]=3 , __UpperCamelCase : int=4 , __UpperCamelCase : List[str]=None , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = embedding_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = scope
def _snake_case ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _snake_case ( self : Any ) ->Dict:
'''simple docstring'''
return MobileBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , embedding_size=self.embedding_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
def _snake_case ( self : str , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Tuple , __UpperCamelCase : str ) ->str:
'''simple docstring'''
_UpperCAmelCase = MobileBertModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , token_type_ids=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = MobileBertForMaskedLM(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Dict , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = MobileBertForNextSentencePrediction(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, 2) )
def _snake_case ( self : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = MobileBertForPreTraining(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , next_sentence_label=__UpperCamelCase , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.seq_relationship_logits.shape , (self.batch_size, 2) )
def _snake_case ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = MobileBertForQuestionAnswering(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : int , __UpperCamelCase : Any , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileBertForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def _snake_case ( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = MobileBertForTokenClassification(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Any ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = MobileBertForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self : int ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : Optional[int] = (
(
MobileBertModel,
MobileBertForMaskedLM,
MobileBertForMultipleChoice,
MobileBertForNextSentencePrediction,
MobileBertForPreTraining,
MobileBertForQuestionAnswering,
MobileBertForSequenceClassification,
MobileBertForTokenClassification,
)
if is_torch_available()
else ()
)
a : Any = (
{
'feature-extraction': MobileBertModel,
'fill-mask': MobileBertForMaskedLM,
'question-answering': MobileBertForQuestionAnswering,
'text-classification': MobileBertForSequenceClassification,
'token-classification': MobileBertForTokenClassification,
'zero-shot': MobileBertForSequenceClassification,
}
if is_torch_available()
else {}
)
a : Optional[int] = True
def _snake_case ( self : int , __UpperCamelCase : Any , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any]=False ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class in get_values(__UpperCamelCase ):
_UpperCAmelCase = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=__UpperCamelCase )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def _snake_case ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = MobileBertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , hidden_size=37 )
def _snake_case ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_model(*__UpperCamelCase )
def _snake_case ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_masked_lm(*__UpperCamelCase )
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_multiple_choice(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_next_sequence_prediction(*__UpperCamelCase )
def _snake_case ( self : List[str] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_pretraining(*__UpperCamelCase )
def _snake_case ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_question_answering(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_sequence_classification(*__UpperCamelCase )
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_mobilebert_for_token_classification(*__UpperCamelCase )
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
return torch.tensor(
_A , dtype=torch.long , device=_A , )
a : str = 1E-3
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
def _snake_case ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase = MobileBertModel.from_pretrained("""google/mobilebert-uncased""" ).to(__UpperCamelCase )
_UpperCAmelCase = _long_tensor([[1_01, 71_10, 10_05, 10_56, 20_23, 1_13_33, 1_74_13, 10_29, 1_02]] )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase )[0]
_UpperCAmelCase = torch.Size((1, 9, 5_12) )
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[
[
[-2.4736526e07, 8.2691656e04, 1.6521838e05],
[-5.7541704e-01, 3.9056022e00, 4.4011507e00],
[2.6047359e00, 1.5677652e00, -1.7324188e-01],
]
] , device=__UpperCamelCase , )
# MobileBERT results range from 10e0 to 10e8. Even a 0.0000001% difference with a value of 10e8 results in a
# ~1 difference, it's therefore not a good idea to measure using addition.
# Here, we instead divide the expected result with the result in order to obtain ~1. We then check that the
# result is held between bounds: 1 - TOLERANCE < expected_result / result < 1 + TOLERANCE
_UpperCAmelCase = torch.all((expected_slice / output[..., :3, :3]) >= 1 - TOLERANCE )
_UpperCAmelCase = torch.all((expected_slice / output[..., :3, :3]) <= 1 + TOLERANCE )
self.assertTrue(lower_bound and upper_bound ) | 700 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
a : List[str] = logging.getLogger(__name__)
a : int = {'''facebook/bart-base''': BartForConditionalGeneration}
a : Dict = {'''facebook/bart-base''': BartTokenizer}
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_A , default=_A , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_A , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_A , default=_A , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_A , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_A , )
parser.add_argument(
"""--config_name""" , type=_A , default=_A , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_A , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_A , default=_A , help="""Where to store the final ONNX file.""" )
_UpperCAmelCase = parser.parse_args()
return args
def _UpperCamelCase ( _A , _A="cpu" ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = model_dict[model_name].from_pretrained(_A ).to(_A )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_A )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_A ) )
with torch.no_grad():
_UpperCAmelCase = """My friends are cool but they eat too many carbs."""
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="""pt""" ).to(model.device )
_UpperCAmelCase = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_A , max_length=_A , early_stopping=_A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_A , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _A , opset_version=1_4 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_A , )
logger.info("""Model exported to {}""".format(_A ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_A ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_A ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_A )
_UpperCAmelCase = ort_sess.run(
_A , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_A ),
"""max_length""": np.array(_A ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase ,_UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _A )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_A )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_A , _A , _A , _A , _A )
if __name__ == "__main__":
main() | 19 | 0 |
"""simple docstring"""
import unittest
from transformers import (
MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
Pipeline,
ZeroShotClassificationPipeline,
pipeline,
)
from transformers.testing_utils import is_pipeline_test, nested_simplify, require_tf, require_torch, slow
from .test_pipelines_common import ANY
# These 2 model types require different inputs than those of the usual text models.
a : Union[str, Any] = {'''LayoutLMv2Config''', '''LayoutLMv3Config'''}
@is_pipeline_test
class a_ ( unittest.TestCase ):
a : List[Any] = MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
a : str = TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING
if model_mapping is not None:
a : List[Any] = {config: model for config, model in model_mapping.items() if config.__name__ not in _TO_SKIP}
if tf_model_mapping is not None:
a : List[str] = {
config: model for config, model in tf_model_mapping.items() if config.__name__ not in _TO_SKIP
}
def _snake_case ( self : List[str] , __UpperCamelCase : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase = ZeroShotClassificationPipeline(
model=__UpperCamelCase , tokenizer=__UpperCamelCase , candidate_labels=["""polics""", """health"""] )
return classifier, ["Who are you voting for in 2020?", "My stomach hurts."]
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics""" )
self.assertEqual(__UpperCamelCase , {"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase )]} )
# No kwarg
_UpperCAmelCase = classifier("""Who are you voting for in 2020?""" , ["""politics"""] )
self.assertEqual(__UpperCamelCase , {"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase )]} )
_UpperCAmelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics"""] )
self.assertEqual(__UpperCamelCase , {"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase )]} )
_UpperCAmelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels="""politics, public health""" )
self.assertEqual(
__UpperCamelCase , {"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
_UpperCAmelCase = classifier("""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health"""] )
self.assertEqual(
__UpperCamelCase , {"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]} )
self.assertAlmostEqual(sum(nested_simplify(outputs["""scores"""] ) ) , 1.0 )
_UpperCAmelCase = classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""This text is about {}""" )
self.assertEqual(__UpperCamelCase , {"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase )]} )
# https://github.com/huggingface/transformers/issues/13846
_UpperCAmelCase = classifier(["""I am happy"""] , ["""positive""", """negative"""] )
self.assertEqual(
__UpperCamelCase , [
{"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]}
for i in range(1 )
] , )
_UpperCAmelCase = classifier(["""I am happy""", """I am sad"""] , ["""positive""", """negative"""] )
self.assertEqual(
__UpperCamelCase , [
{"""sequence""": ANY(__UpperCamelCase ), """labels""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )], """scores""": [ANY(__UpperCamelCase ), ANY(__UpperCamelCase )]}
for i in range(2 )
] , )
with self.assertRaises(__UpperCamelCase ):
classifier("""""" , candidate_labels="""politics""" )
with self.assertRaises(__UpperCamelCase ):
classifier(__UpperCamelCase , candidate_labels="""politics""" )
with self.assertRaises(__UpperCamelCase ):
classifier("""Who are you voting for in 2020?""" , candidate_labels="""""" )
with self.assertRaises(__UpperCamelCase ):
classifier("""Who are you voting for in 2020?""" , candidate_labels=__UpperCamelCase )
with self.assertRaises(__UpperCamelCase ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template="""Not formatting template""" , )
with self.assertRaises(__UpperCamelCase ):
classifier(
"""Who are you voting for in 2020?""" , candidate_labels="""politics""" , hypothesis_template=__UpperCamelCase , )
self.run_entailment_id(__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : Pipeline ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = zero_shot_classifier.model.config
_UpperCAmelCase = config.labelaid
_UpperCAmelCase = zero_shot_classifier.entailment_id
_UpperCAmelCase = {"""LABEL_0""": 0, """LABEL_1""": 1, """LABEL_2""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , -1 )
_UpperCAmelCase = {"""entailment""": 0, """neutral""": 1, """contradiction""": 2}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {"""ENTAIL""": 0, """NON-ENTAIL""": 1}
self.assertEqual(zero_shot_classifier.entailment_id , 0 )
_UpperCAmelCase = {"""ENTAIL""": 2, """NEUTRAL""": 1, """CONTR""": 0}
self.assertEqual(zero_shot_classifier.entailment_id , 2 )
_UpperCAmelCase = original_labelaid
self.assertEqual(__UpperCamelCase , zero_shot_classifier.entailment_id )
@require_torch
def _snake_case ( self : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
# There was a regression in 4.10 for this
# Adding a test so we don't make the mistake again.
# https://github.com/huggingface/transformers/issues/13381#issuecomment-912343499
zero_shot_classifier(
"""Who are you voting for in 2020?""" * 1_00 , candidate_labels=["""politics""", """public health""", """science"""] )
@require_torch
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""pt""" , )
_UpperCAmelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@require_tf
def _snake_case ( self : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase = pipeline(
"""zero-shot-classification""" , model="""sshleifer/tiny-distilbert-base-cased-distilled-squad""" , framework="""tf""" , )
_UpperCAmelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""science""", """public health""", """politics"""],
"""scores""": [0.3_3_3, 0.3_3_3, 0.3_3_3],
} , )
@slow
@require_torch
def _snake_case ( self : Optional[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""pt""" )
_UpperCAmelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
_UpperCAmelCase = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=__UpperCamelCase , )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , )
@slow
@require_tf
def _snake_case ( self : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pipeline("""zero-shot-classification""" , model="""roberta-large-mnli""" , framework="""tf""" )
_UpperCAmelCase = zero_shot_classifier(
"""Who are you voting for in 2020?""" , candidate_labels=["""politics""", """public health""", """science"""] )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
"""sequence""": """Who are you voting for in 2020?""",
"""labels""": ["""politics""", """public health""", """science"""],
"""scores""": [0.9_7_6, 0.0_1_5, 0.0_0_9],
} , )
_UpperCAmelCase = zero_shot_classifier(
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural networks"""
""" in an encoder-decoder configuration. The best performing models also connect the encoder and decoder"""
""" through an attention mechanism. We propose a new simple network architecture, the Transformer, based"""
""" solely on attention mechanisms, dispensing with recurrence and convolutions entirely. Experiments on two"""
""" machine translation tasks show these models to be superior in quality while being more parallelizable"""
""" and requiring significantly less time to train. Our model achieves 28.4 BLEU on the WMT 2014"""
""" English-to-German translation task, improving over the existing best results, including ensembles by"""
""" over 2 BLEU. On the WMT 2014 English-to-French translation task, our model establishes a new"""
""" single-model state-of-the-art BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small"""
""" fraction of the training costs of the best models from the literature. We show that the Transformer"""
""" generalizes well to other tasks by applying it successfully to English constituency parsing both with"""
""" large and limited training data.""" , candidate_labels=["""machine learning""", """statistics""", """translation""", """vision"""] , multi_label=__UpperCamelCase , )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , {
"""sequence""": (
"""The dominant sequence transduction models are based on complex recurrent or convolutional neural"""
""" networks in an encoder-decoder configuration. The best performing models also connect the"""
""" encoder and decoder through an attention mechanism. We propose a new simple network"""
""" architecture, the Transformer, based solely on attention mechanisms, dispensing with recurrence"""
""" and convolutions entirely. Experiments on two machine translation tasks show these models to be"""
""" superior in quality while being more parallelizable and requiring significantly less time to"""
""" train. Our model achieves 28.4 BLEU on the WMT 2014 English-to-German translation task,"""
""" improving over the existing best results, including ensembles by over 2 BLEU. On the WMT 2014"""
""" English-to-French translation task, our model establishes a new single-model state-of-the-art"""
""" BLEU score of 41.8 after training for 3.5 days on eight GPUs, a small fraction of the training"""
""" costs of the best models from the literature. We show that the Transformer generalizes well to"""
""" other tasks by applying it successfully to English constituency parsing both with large and"""
""" limited training data."""
),
"""labels""": ["""translation""", """machine learning""", """vision""", """statistics"""],
"""scores""": [0.8_1_7, 0.7_1_3, 0.0_1_8, 0.0_1_8],
} , ) | 701 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A , _A , _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = requests.get(_A , headers=_A , allow_redirects=_A )
_UpperCAmelCase = result.headers["""Location"""]
_UpperCAmelCase = requests.get(_A , allow_redirects=_A )
_UpperCAmelCase = os.path.join(_A , F"""{artifact_name}.zip""" )
with open(_A , """wb""" ) as fp:
fp.write(response.content )
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = None
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_A ) as f:
for line in f:
_UpperCAmelCase = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_UpperCAmelCase = line[: line.index(""": """ )]
_UpperCAmelCase = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_UpperCAmelCase = line[len("""FAILED """ ) :]
failed_tests.append(_A )
elif filename == "job_name.txt":
_UpperCAmelCase = line
if len(_A ) != len(_A ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(_A )} for `errors` """
F"""and {len(_A )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
_UpperCAmelCase = None
if job_name and job_links:
_UpperCAmelCase = job_links.get(_A , _A )
# A list with elements of the form (line of error, error, failed test)
_UpperCAmelCase = [x + [y] + [job_link] for x, y in zip(_A , _A )]
return result
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = [os.path.join(_A , _A ) for p in os.listdir(_A ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_A , job_links=_A ) )
return errors
def _UpperCamelCase ( _A , _A=None ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_UpperCAmelCase = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_UpperCAmelCase = test.split("""/""" )[2]
else:
_UpperCAmelCase = None
return test
def _UpperCamelCase ( _A , _A=None ) -> Any:
"""simple docstring"""
_UpperCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_UpperCAmelCase = [x for x in logs if x[2] is not None]
_UpperCAmelCase = {x[2] for x in logs}
_UpperCAmelCase = {}
for test in tests:
_UpperCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_UpperCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_UpperCAmelCase = {"""count""": n_errors, """errors""": error_counts}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = """| no. | error | status |"""
_UpperCAmelCase = """|-:|:-|:-|"""
_UpperCAmelCase = [header, sep]
for error in reduced_by_error:
_UpperCAmelCase = reduced_by_error[error]["""count"""]
_UpperCAmelCase = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(_A )
return "\n".join(_A )
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = """| model | no. of errors | major error | count |"""
_UpperCAmelCase = """|-:|-:|-:|-:|"""
_UpperCAmelCase = [header, sep]
for model in reduced_by_model:
_UpperCAmelCase = reduced_by_model[model]["""count"""]
_UpperCAmelCase ,_UpperCAmelCase = list(reduced_by_model[model]["""errors"""].items() )[0]
_UpperCAmelCase = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(_A )
return "\n".join(_A )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
a : Dict = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a : Tuple = get_job_links(args.workflow_run_id, token=args.token)
a : Tuple = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a : List[Any] = k.find(''' / ''')
a : Tuple = k[index + len(''' / ''') :]
a : int = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a : Union[str, Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a : Optional[int] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a : int = reduce_by_error(errors)
a : str = reduce_by_model(errors)
a : int = make_github_table(reduced_by_error)
a : Optional[int] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa) | 19 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import BertForMaskedLM
if __name__ == "__main__":
a : str = argparse.ArgumentParser(
description=(
'''Extraction some layers of the full BertForMaskedLM or RObertaForMaskedLM for Transfer Learned'''
''' Distillation'''
)
)
parser.add_argument('''--model_type''', default='''bert''', choices=['''bert'''])
parser.add_argument('''--model_name''', default='''bert-base-uncased''', type=str)
parser.add_argument('''--dump_checkpoint''', default='''serialization_dir/tf_bert-base-uncased_0247911.pth''', type=str)
parser.add_argument('''--vocab_transform''', action='''store_true''')
a : str = parser.parse_args()
if args.model_type == "bert":
a : Any = BertForMaskedLM.from_pretrained(args.model_name)
a : Tuple = '''bert'''
else:
raise ValueError('''args.model_type should be "bert".''')
a : Optional[Any] = model.state_dict()
a : Dict = {}
for w in ["word_embeddings", "position_embeddings"]:
a : List[str] = state_dict[F"{prefix}.embeddings.{w}.weight"]
for w in ["weight", "bias"]:
a : List[Any] = state_dict[F"{prefix}.embeddings.LayerNorm.{w}"]
a : Dict = 0
for teacher_idx in [0, 2, 4, 7, 9, 1_1]:
for w in ["weight", "bias"]:
a : List[str] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.query.{w}"
]
a : Optional[int] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.key.{w}"
]
a : int = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.self.value.{w}"
]
a : Union[str, Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.dense.{w}"
]
a : str = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.attention.output.LayerNorm.{w}"
]
a : Optional[Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.intermediate.dense.{w}"
]
a : str = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.dense.{w}"
]
a : List[Any] = state_dict[
F"{prefix}.encoder.layer.{teacher_idx}.output.LayerNorm.{w}"
]
std_idx += 1
a : Tuple = state_dict['''cls.predictions.decoder.weight''']
a : Optional[Any] = state_dict['''cls.predictions.bias''']
if args.vocab_transform:
for w in ["weight", "bias"]:
a : Optional[Any] = state_dict[F"cls.predictions.transform.dense.{w}"]
a : int = state_dict[F"cls.predictions.transform.LayerNorm.{w}"]
print(F"N layers selected for distillation: {std_idx}")
print(F"Number of params transferred for distillation: {len(compressed_sd.keys())}")
print(F"Save transferred checkpoint to {args.dump_checkpoint}.")
torch.save(compressed_sd, args.dump_checkpoint) | 702 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( _UpperCAmelCase ):
a : Any = ['image_processor', 'tokenizer']
a : Optional[int] = 'AutoImageProcessor'
a : Any = 'AutoTokenizer'
def __init__( self : List[str] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=None , **__UpperCamelCase : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop("""feature_extractor""" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def __call__( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->List[str]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""images""" , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""text""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_UpperCAmelCase = self.image_processor(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if text is not None:
_UpperCAmelCase = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCAmelCase = encodings["""input_ids"""]
return inputs
def _snake_case ( self : Union[str, Any] , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ) ->Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ) ->int:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@contextmanager
def _snake_case ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer
yield
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def _snake_case ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Union[str, Any]=None ) ->List[str]:
'''simple docstring'''
if added_vocab is None:
_UpperCAmelCase = self.tokenizer.get_added_vocab()
_UpperCAmelCase = {}
while tokens:
_UpperCAmelCase = re.search(r"""<s_(.*?)>""" , __UpperCamelCase , re.IGNORECASE )
if start_token is None:
break
_UpperCAmelCase = start_token.group(1 )
_UpperCAmelCase = re.search(rf"""</s_{key}>""" , __UpperCamelCase , re.IGNORECASE )
_UpperCAmelCase = start_token.group()
if end_token is None:
_UpperCAmelCase = tokens.replace(__UpperCamelCase , """""" )
else:
_UpperCAmelCase = end_token.group()
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __UpperCamelCase , re.IGNORECASE )
if content is not None:
_UpperCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_UpperCAmelCase = self.tokenajson(__UpperCamelCase , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if value:
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = value[0]
_UpperCAmelCase = value
else: # leaf nodes
_UpperCAmelCase = []
for leaf in content.split(r"""<sep/>""" ):
_UpperCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_UpperCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCamelCase )
if len(output[key] ) == 1:
_UpperCAmelCase = output[key][0]
_UpperCAmelCase = tokens[tokens.find(__UpperCamelCase ) + len(__UpperCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if len(__UpperCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , )
return self.image_processor | 19 | 0 |
"""simple docstring"""
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : List[Any] = get_logger()
a : Optional[dict] = None
class a_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : int ) ->Tuple:
'''simple docstring'''
super().__init__(features=__UpperCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(__UpperCamelCase )}, as `jaxlib.xla_extension.Device` """
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
_UpperCAmelCase = device if isinstance(__UpperCamelCase , __UpperCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
_UpperCAmelCase = str(jax.devices()[0] )
_UpperCAmelCase = jnp_array_kwargs
@staticmethod
def _snake_case ( ) ->Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(__UpperCamelCase ): device for device in jax.devices()}
def _snake_case ( self : Dict , __UpperCamelCase : Any ) ->Union[str, Any]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , __UpperCamelCase ) and column:
if all(
isinstance(__UpperCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__UpperCamelCase , axis=0 )
return column
def _snake_case ( self : List[str] , __UpperCamelCase : Any ) ->Optional[int]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , (str, bytes, type(__UpperCamelCase )) ):
return value
elif isinstance(__UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase = {}
if isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
else:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
elif isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCamelCase , PIL.Image.Image ):
_UpperCAmelCase = np.asarray(__UpperCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__UpperCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__UpperCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__UpperCamelCase , """__array__""" ) and not isinstance(__UpperCamelCase , jax.Array ):
_UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
elif isinstance(__UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : dict ) ->int:
'''simple docstring'''
return map_nested(self._recursive_tensorize , __UpperCamelCase , map_list=__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_row(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_row(__UpperCamelCase )
return self.recursive_tensorize(__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : pa.Table ) ->"jax.Array":
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_column(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_column(__UpperCamelCase , pa_table.column_names[0] )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
_UpperCAmelCase = self._consolidate(__UpperCamelCase )
return column
def _snake_case ( self : Optional[Any] , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_batch(__UpperCamelCase )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
for column_name in batch:
_UpperCAmelCase = self._consolidate(batch[column_name] )
return batch | 703 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( _A , _A , _A ) -> float:
"""simple docstring"""
_UpperCAmelCase = x
_UpperCAmelCase = y
for step in range(_A ): # noqa: B007
_UpperCAmelCase = a * a - b * b + x
_UpperCAmelCase = 2 * a * b + y
_UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_A , 1 , 1 ) )
def _UpperCamelCase ( _A = 8_0_0 , _A = 6_0_0 , _A = -0.6 , _A = 0 , _A = 3.2 , _A = 5_0 , _A = True , ) -> Image.Image:
"""simple docstring"""
_UpperCAmelCase = Image.new("""RGB""" , (image_width, image_height) )
_UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(_A ):
for image_y in range(_A ):
# determine the figure-coordinates based on the image-coordinates
_UpperCAmelCase = figure_width / image_width * image_height
_UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
_UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
_UpperCAmelCase = get_distance(_A , _A , _A )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_UpperCAmelCase = get_color_coded_rgb(_A )
else:
_UpperCAmelCase = get_black_and_white_rgb(_A )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 19 | 0 |
"""simple docstring"""
from __future__ import annotations
def _UpperCamelCase ( _A , _A , _A ) -> tuple[float, list[float]]:
"""simple docstring"""
_UpperCAmelCase = list(range(len(_A ) ) )
_UpperCAmelCase = [v / w for v, w in zip(_A , _A )]
index.sort(key=lambda _A : ratio[i] , reverse=_A )
_UpperCAmelCase = 0
_UpperCAmelCase = [0] * len(_A )
for i in index:
if weight[i] <= capacity:
_UpperCAmelCase = 1
max_value += value[i]
capacity -= weight[i]
else:
_UpperCAmelCase = capacity / weight[i]
max_value += value[i] * capacity / weight[i]
break
return max_value, fractions
if __name__ == "__main__":
import doctest
doctest.testmod() | 704 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a_ ( nn.Module ):
def __init__( self : List[str] , __UpperCamelCase : int = 16 , __UpperCamelCase : int = 88 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 32 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "geglu" , __UpperCamelCase : Optional[int] = None , ) ->Dict:
'''simple docstring'''
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__UpperCamelCase , attention_head_dim=__UpperCamelCase , in_channels=__UpperCamelCase , num_layers=__UpperCamelCase , dropout=__UpperCamelCase , norm_num_groups=__UpperCamelCase , cross_attention_dim=__UpperCamelCase , attention_bias=__UpperCamelCase , sample_size=__UpperCamelCase , num_vector_embeds=__UpperCamelCase , activation_fn=__UpperCamelCase , num_embeds_ada_norm=__UpperCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def _snake_case ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : bool = True , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase , cross_attention_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
import os
from huggingface_hub.constants import HUGGINGFACE_HUB_CACHE, hf_cache_home
a : List[Any] = HUGGINGFACE_HUB_CACHE
a : Optional[int] = '''config.json'''
a : List[Any] = '''diffusion_pytorch_model.bin'''
a : Union[str, Any] = '''diffusion_flax_model.msgpack'''
a : Union[str, Any] = '''model.onnx'''
a : Optional[int] = '''diffusion_pytorch_model.safetensors'''
a : str = '''weights.pb'''
a : Dict = '''https://huggingface.co'''
a : Optional[Any] = default_cache_path
a : Dict = '''diffusers_modules'''
a : Optional[Any] = os.getenv('''HF_MODULES_CACHE''', os.path.join(hf_cache_home, '''modules'''))
a : str = ['''fp16''', '''non-ema''']
a : Optional[Any] = '''.self_attn''' | 705 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( _A , _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = LxmertConfig.from_json_file(_A )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase = LxmertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_A , _A , _A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 19 | 0 |
def _UpperCamelCase ( _A , _A , _A ) -> int:
"""simple docstring"""
def update_area_of_max_square(_A , _A ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_UpperCAmelCase = update_area_of_max_square(_A , col + 1 )
_UpperCAmelCase = update_area_of_max_square(row + 1 , col + 1 )
_UpperCAmelCase = update_area_of_max_square(row + 1 , _A )
if mat[row][col]:
_UpperCAmelCase = 1 + min([right, diagonal, down] )
_UpperCAmelCase = max(largest_square_area[0] , _A )
return sub_problem_sol
else:
return 0
_UpperCAmelCase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _UpperCamelCase ( _A , _A , _A ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_A , _A , _A ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_UpperCAmelCase = update_area_of_max_square_using_dp_array(_A , col + 1 , _A )
_UpperCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , _A )
_UpperCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , _A , _A )
if mat[row][col]:
_UpperCAmelCase = 1 + min([right, diagonal, down] )
_UpperCAmelCase = max(largest_square_area[0] , _A )
_UpperCAmelCase = sub_problem_sol
return sub_problem_sol
else:
return 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [[-1] * cols for _ in range(_A )]
update_area_of_max_square_using_dp_array(0 , 0 , _A )
return largest_square_area[0]
def _UpperCamelCase ( _A , _A , _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = [[0] * (cols + 1) for _ in range(rows + 1 )]
_UpperCAmelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_UpperCAmelCase = dp_array[row][col + 1]
_UpperCAmelCase = dp_array[row + 1][col + 1]
_UpperCAmelCase = dp_array[row + 1][col]
if mat[row][col] == 1:
_UpperCAmelCase = 1 + min(_A , _A , _A )
_UpperCAmelCase = max(dp_array[row][col] , _A )
else:
_UpperCAmelCase = 0
return largest_square_area
def _UpperCamelCase ( _A , _A , _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = [0] * (cols + 1)
_UpperCAmelCase = [0] * (cols + 1)
_UpperCAmelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_UpperCAmelCase = current_row[col + 1]
_UpperCAmelCase = next_row[col + 1]
_UpperCAmelCase = next_row[col]
if mat[row][col] == 1:
_UpperCAmelCase = 1 + min(_A , _A , _A )
_UpperCAmelCase = max(current_row[col] , _A )
else:
_UpperCAmelCase = 0
_UpperCAmelCase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]])) | 706 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
a : str = '''examples/'''
a : List[str] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a : Tuple = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
a : List[str] = '''README.md'''
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase ,_UpperCAmelCase = REPLACE_PATTERNS[pattern]
_UpperCAmelCase = replace.replace("""VERSION""" , _A )
_UpperCAmelCase = re_pattern.sub(_A , _A )
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_A )
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern="""examples""" )
def _UpperCamelCase ( _A , _A=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
_UpperCAmelCase = """🤗 Transformers currently provides the following architectures"""
_UpperCAmelCase = """1. Want to contribute a new model?"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase = f.readlines()
# Find the start of the list.
_UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
_UpperCAmelCase = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_A )
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = REPLACE_PATTERNS["""init"""][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def _UpperCamelCase ( _A=False ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
_UpperCAmelCase = default_version.base_version
elif patch:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_UpperCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
_UpperCAmelCase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
def _UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_version()
_UpperCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
_UpperCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
_UpperCAmelCase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_A )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work() | 19 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import MobileBertTokenizer, MobileBertTokenizerFast
from transformers.models.bert.tokenization_bert import (
VOCAB_FILES_NAMES,
BasicTokenizer,
WordpieceTokenizer,
_is_control,
_is_punctuation,
_is_whitespace,
)
from transformers.testing_utils import require_tokenizers, slow
from ...test_tokenization_common import TokenizerTesterMixin, filter_non_english
@require_tokenizers
class a_ ( _UpperCAmelCase , unittest.TestCase ):
a : int = MobileBertTokenizer
a : Optional[int] = MobileBertTokenizerFast
a : Optional[Any] = True
a : Any = True
a : Union[str, Any] = filter_non_english
a : List[Any] = 'google/mobilebert-uncased'
def _snake_case ( self : str ) ->str:
'''simple docstring'''
super().setUp()
_UpperCAmelCase = [
"""[UNK]""",
"""[CLS]""",
"""[SEP]""",
"""[PAD]""",
"""[MASK]""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
_UpperCAmelCase = [
(tokenizer_def[0], self.pre_trained_model_path, tokenizer_def[2]) # else the 'google/' prefix is stripped
for tokenizer_def in self.tokenizers_list
]
def _snake_case ( self : str , __UpperCamelCase : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = """UNwant\u00E9d,running"""
_UpperCAmelCase = """unwanted, running"""
return input_text, output_text
def _snake_case ( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.tokenizer_class(self.vocab_file )
_UpperCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(__UpperCamelCase , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , [9, 6, 7, 12, 10, 11] )
def _snake_case ( self : Optional[Any] ) ->Any:
'''simple docstring'''
if not self.test_rust_tokenizer:
return
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = """UNwant\u00E9d,running"""
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
# With lower casing
_UpperCAmelCase = self.get_tokenizer(do_lower_case=__UpperCamelCase )
_UpperCAmelCase = self.get_rust_tokenizer(do_lower_case=__UpperCamelCase )
_UpperCAmelCase = """UNwant\u00E9d,running"""
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = tokenizer.encode(__UpperCamelCase )
_UpperCAmelCase = rust_tokenizer.encode(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Tuple ) ->int:
'''simple docstring'''
_UpperCAmelCase = BasicTokenizer()
self.assertListEqual(tokenizer.tokenize("""ah\u535A\u63A8zz""" ) , ["""ah""", """\u535A""", """\u63A8""", """zz"""] )
def _snake_case ( self : Optional[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""hello""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self : int ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hällo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""h\u00E9llo"""] )
def _snake_case ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""hallo""", """!""", """how""", """are""", """you""", """?"""] )
self.assertListEqual(tokenizer.tokenize("""H\u00E9llo""" ) , ["""hello"""] )
def _snake_case ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? """ ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self : Tuple ) ->int:
'''simple docstring'''
_UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HäLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , strip_accents=__UpperCamelCase )
self.assertListEqual(
tokenizer.tokenize(""" \tHäLLo!how \n Are yoU? """ ) , ["""HaLLo""", """!""", """how""", """Are""", """yoU""", """?"""] )
def _snake_case ( self : Union[str, Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = BasicTokenizer(do_lower_case=__UpperCamelCase , never_split=["""[UNK]"""] )
self.assertListEqual(
tokenizer.tokenize(""" \tHeLLo!how \n Are yoU? [UNK]""" ) , ["""HeLLo""", """!""", """how""", """Are""", """yoU""", """?""", """[UNK]"""] )
def _snake_case ( self : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = ["""[UNK]""", """[CLS]""", """[SEP]""", """want""", """##want""", """##ed""", """wa""", """un""", """runn""", """##ing"""]
_UpperCAmelCase = {}
for i, token in enumerate(__UpperCamelCase ):
_UpperCAmelCase = i
_UpperCAmelCase = WordpieceTokenizer(vocab=__UpperCamelCase , unk_token="""[UNK]""" )
self.assertListEqual(tokenizer.tokenize("""""" ) , [] )
self.assertListEqual(tokenizer.tokenize("""unwanted running""" ) , ["""un""", """##want""", """##ed""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.tokenize("""unwantedX running""" ) , ["""[UNK]""", """runn""", """##ing"""] )
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
self.assertTrue(_is_whitespace(""" """ ) )
self.assertTrue(_is_whitespace("""\t""" ) )
self.assertTrue(_is_whitespace("""\r""" ) )
self.assertTrue(_is_whitespace("""\n""" ) )
self.assertTrue(_is_whitespace("""\u00A0""" ) )
self.assertFalse(_is_whitespace("""A""" ) )
self.assertFalse(_is_whitespace("""-""" ) )
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
self.assertTrue(_is_control("""\u0005""" ) )
self.assertFalse(_is_control("""A""" ) )
self.assertFalse(_is_control(""" """ ) )
self.assertFalse(_is_control("""\t""" ) )
self.assertFalse(_is_control("""\r""" ) )
def _snake_case ( self : Dict ) ->List[Any]:
'''simple docstring'''
self.assertTrue(_is_punctuation("""-""" ) )
self.assertTrue(_is_punctuation("""$""" ) )
self.assertTrue(_is_punctuation("""`""" ) )
self.assertTrue(_is_punctuation(""".""" ) )
self.assertFalse(_is_punctuation("""A""" ) )
self.assertFalse(_is_punctuation(""" """ ) )
def _snake_case ( self : Optional[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_rust_tokenizer()
# Example taken from the issue https://github.com/huggingface/tokenizers/issues/340
self.assertListEqual([tokenizer.tokenize(__UpperCamelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
self.assertListEqual(
[rust_tokenizer.tokenize(__UpperCamelCase ) for t in ["""Test""", """\xad""", """test"""]] , [["""[UNK]"""], [], ["""[UNK]"""]] )
@slow
def _snake_case ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.tokenizer_class.from_pretrained("""google/mobilebert-uncased""" )
_UpperCAmelCase = tokenizer.encode("""sequence builders""" , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = tokenizer.encode("""multi-sequence build""" , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase )
_UpperCAmelCase = tokenizer.build_inputs_with_special_tokens(__UpperCamelCase , __UpperCamelCase )
assert encoded_sentence == [1_01] + text + [1_02]
assert encoded_pair == [1_01] + text + [1_02] + text_a + [1_02]
def _snake_case ( self : int ) ->Tuple:
'''simple docstring'''
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = f"""A, naïve {tokenizer_r.mask_token} AllenNLP sentence."""
_UpperCAmelCase = tokenizer_r.encode_plus(
__UpperCamelCase , return_attention_mask=__UpperCamelCase , return_token_type_ids=__UpperCamelCase , return_offsets_mapping=__UpperCamelCase , add_special_tokens=__UpperCamelCase , )
_UpperCAmelCase = tokenizer_r.do_lower_case if hasattr(__UpperCamelCase , """do_lower_case""" ) else False
_UpperCAmelCase = (
[
((0, 0), tokenizer_r.cls_token),
((0, 1), """A"""),
((1, 2), ""","""),
((3, 5), """na"""),
((5, 6), """##ï"""),
((6, 8), """##ve"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """Allen"""),
((21, 23), """##NL"""),
((23, 24), """##P"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
if not do_lower_case
else [
((0, 0), tokenizer_r.cls_token),
((0, 1), """a"""),
((1, 2), ""","""),
((3, 8), """naive"""),
((9, 15), tokenizer_r.mask_token),
((16, 21), """allen"""),
((21, 23), """##nl"""),
((23, 24), """##p"""),
((25, 33), """sentence"""),
((33, 34), """."""),
((0, 0), tokenizer_r.sep_token),
]
)
self.assertEqual(
[e[1] for e in expected_results] , tokenizer_r.convert_ids_to_tokens(tokens["""input_ids"""] ) )
self.assertEqual([e[0] for e in expected_results] , tokens["""offset_mapping"""] )
def _snake_case ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = ["""的""", """人""", """有"""]
_UpperCAmelCase = """""".join(__UpperCamelCase )
for tokenizer, pretrained_name, kwargs in self.tokenizers_list:
with self.subTest(f"""{tokenizer.__class__.__name__} ({pretrained_name})""" ):
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = tokenizer_p.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = tokenizer_r.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(__UpperCamelCase )
_UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(__UpperCamelCase )
# it is expected that each Chinese character is not preceded by "##"
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = False
_UpperCAmelCase = self.rust_tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = self.tokenizer_class.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = tokenizer_r.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = tokenizer_p.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
_UpperCAmelCase = tokenizer_r.convert_ids_to_tokens(__UpperCamelCase )
_UpperCAmelCase = tokenizer_p.convert_ids_to_tokens(__UpperCamelCase )
# it is expected that only the first Chinese character is not preceded by "##".
_UpperCAmelCase = [
f"""##{token}""" if idx != 0 else token for idx, token in enumerate(__UpperCamelCase )
]
self.assertListEqual(__UpperCamelCase , __UpperCamelCase )
self.assertListEqual(__UpperCamelCase , __UpperCamelCase ) | 707 |
"""simple docstring"""
from __future__ import annotations
def _UpperCamelCase ( _A ) -> None:
"""simple docstring"""
create_state_space_tree(_A , [] , 0 , [0 for i in range(len(_A ) )] )
def _UpperCamelCase ( _A , _A , _A , _A , ) -> None:
"""simple docstring"""
if index == len(_A ):
print(_A )
return
for i in range(len(_A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_UpperCAmelCase = True
create_state_space_tree(_A , _A , index + 1 , _A )
current_sequence.pop()
_UpperCAmelCase = False
a : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
a : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 19 | 0 |
"""simple docstring"""
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_albert import AlbertTokenizer
else:
a : Union[str, Any] = None
a : Optional[Any] = logging.get_logger(__name__)
a : List[Any] = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
a : Optional[Any] = {
'''vocab_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/spiece.model''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/spiece.model''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/spiece.model''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/spiece.model''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/spiece.model''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/spiece.model''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/spiece.model''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''albert-base-v1''': '''https://huggingface.co/albert-base-v1/resolve/main/tokenizer.json''',
'''albert-large-v1''': '''https://huggingface.co/albert-large-v1/resolve/main/tokenizer.json''',
'''albert-xlarge-v1''': '''https://huggingface.co/albert-xlarge-v1/resolve/main/tokenizer.json''',
'''albert-xxlarge-v1''': '''https://huggingface.co/albert-xxlarge-v1/resolve/main/tokenizer.json''',
'''albert-base-v2''': '''https://huggingface.co/albert-base-v2/resolve/main/tokenizer.json''',
'''albert-large-v2''': '''https://huggingface.co/albert-large-v2/resolve/main/tokenizer.json''',
'''albert-xlarge-v2''': '''https://huggingface.co/albert-xlarge-v2/resolve/main/tokenizer.json''',
'''albert-xxlarge-v2''': '''https://huggingface.co/albert-xxlarge-v2/resolve/main/tokenizer.json''',
},
}
a : Dict = {
'''albert-base-v1''': 5_1_2,
'''albert-large-v1''': 5_1_2,
'''albert-xlarge-v1''': 5_1_2,
'''albert-xxlarge-v1''': 5_1_2,
'''albert-base-v2''': 5_1_2,
'''albert-large-v2''': 5_1_2,
'''albert-xlarge-v2''': 5_1_2,
'''albert-xxlarge-v2''': 5_1_2,
}
a : Any = '''▁'''
class a_ ( _UpperCAmelCase ):
a : List[str] = VOCAB_FILES_NAMES
a : Tuple = PRETRAINED_VOCAB_FILES_MAP
a : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Any = AlbertTokenizer
def __init__( self : int , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : int=None , __UpperCamelCase : Any=True , __UpperCamelCase : int=True , __UpperCamelCase : Dict=False , __UpperCamelCase : Union[str, Any]="[CLS]" , __UpperCamelCase : List[Any]="[SEP]" , __UpperCamelCase : Dict="<unk>" , __UpperCamelCase : Dict="[SEP]" , __UpperCamelCase : Optional[int]="<pad>" , __UpperCamelCase : Union[str, Any]="[CLS]" , __UpperCamelCase : Optional[int]="[MASK]" , **__UpperCamelCase : Optional[Any] , ) ->int:
'''simple docstring'''
_UpperCAmelCase = (
AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase , normalized=__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase )
else mask_token
)
super().__init__(
__UpperCamelCase , tokenizer_file=__UpperCamelCase , do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , **__UpperCamelCase , )
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = False if not self.vocab_file else True
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _snake_case ( self : str , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _snake_case ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ):
copyfile(self.vocab_file , __UpperCamelCase )
return (out_vocab_file,) | 708 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : int=32 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : Dict=3 , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=32 , __UpperCamelCase : Any=4 , __UpperCamelCase : Optional[int]=[0, 1, 2, 3] , __UpperCamelCase : str=4 , __UpperCamelCase : Optional[Any]=37 , __UpperCamelCase : str="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Any=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : int=[1, 3_84, 24, 24] , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any=None , ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = backbone_out_indices
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = backbone_featmap_shape
_UpperCAmelCase = scope
_UpperCAmelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 1_92, 3_84, 7_68],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _snake_case ( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _snake_case ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a : int = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : str = False
a : List[str] = False
a : Dict = False
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = DPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def _snake_case ( self : Optional[int] ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
pass
def _snake_case ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def _snake_case ( self : str ) ->Any:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ):
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = False
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
_UpperCAmelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_UpperCAmelCase = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self : Dict ) ->Tuple:
'''simple docstring'''
pass
@slow
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_UpperCAmelCase = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = """add"""
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class a_ ( unittest.TestCase ):
def _snake_case ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
_UpperCAmelCase = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
_UpperCAmelCase = outputs.predicted_depth
# verify the predicted depth
_UpperCAmelCase = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , __UpperCamelCase , atol=1e-4 ) ) | 19 | 0 |
"""simple docstring"""
from ...utils import (
OptionalDependencyNotAvailable,
is_torch_available,
is_transformers_available,
is_transformers_version,
)
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import (
ImageTextPipelineOutput,
UniDiffuserPipeline,
)
else:
from .modeling_text_decoder import UniDiffuserTextDecoder
from .modeling_uvit import UniDiffuserModel, UTransformeraDModel
from .pipeline_unidiffuser import ImageTextPipelineOutput, UniDiffuserPipeline | 709 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a : List[str] = logging.get_logger(__name__)
class a_ ( enum.Enum ):
a : Optional[Any] = 0
a : Dict = 1
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'generated'
def __init__( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : str ) ->Any:
'''simple docstring'''
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _snake_case ( self : Optional[int] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Dict=None , **__UpperCamelCase : Any , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
if truncation is not None:
_UpperCAmelCase = truncation
_UpperCAmelCase = generate_kwargs
_UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
_UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCAmelCase = self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
_UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _snake_case ( self : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
return True
def _snake_case ( self : Optional[Any] , *__UpperCamelCase : Any , __UpperCamelCase : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
_UpperCAmelCase = ([prefix + arg for arg in args[0]],)
_UpperCAmelCase = True
elif isinstance(args[0] , __UpperCamelCase ):
_UpperCAmelCase = (prefix + args[0],)
_UpperCAmelCase = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
_UpperCAmelCase = self.tokenizer(*__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Dict , *__UpperCamelCase : str , **__UpperCamelCase : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = super().__call__(*__UpperCamelCase , **__UpperCamelCase )
if (
isinstance(args[0] , __UpperCamelCase )
and all(isinstance(__UpperCamelCase , __UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _snake_case ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : str=TruncationStrategy.DO_NOT_TRUNCATE , **__UpperCamelCase : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self._parse_and_tokenize(__UpperCamelCase , truncation=__UpperCamelCase , **__UpperCamelCase )
return inputs
def _snake_case ( self : str , __UpperCamelCase : Dict , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
if self.framework == "pt":
_UpperCAmelCase ,_UpperCAmelCase = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
_UpperCAmelCase ,_UpperCAmelCase = tf.shape(model_inputs["""input_ids"""] ).numpy()
_UpperCAmelCase = generate_kwargs.get("""min_length""" , self.model.config.min_length )
_UpperCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__UpperCamelCase , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
_UpperCAmelCase = self.model.generate(**__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCAmelCase = output_ids.reshape(__UpperCamelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
_UpperCAmelCase = tf.reshape(__UpperCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any]=ReturnType.TEXT , __UpperCamelCase : int=False ) ->Any:
'''simple docstring'''
_UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCAmelCase = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
_UpperCAmelCase = {
f"""{self.return_name}_text""": self.tokenizer.decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , )
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'summary'
def __call__( self : Optional[Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[int] ) ->Any:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : str , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : Optional[int] = 'translation'
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def _snake_case ( self : Tuple , *__UpperCamelCase : List[str] , __UpperCamelCase : Tuple=TruncationStrategy.DO_NOT_TRUNCATE , __UpperCamelCase : Tuple=None , __UpperCamelCase : Union[str, Any]=None ) ->Tuple:
'''simple docstring'''
if getattr(self.tokenizer , """_build_translation_inputs""" , __UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase , return_tensors=self.framework , truncation=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase , truncation=__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : int=None , __UpperCamelCase : int=None , **__UpperCamelCase : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
_UpperCAmelCase = src_lang
if tgt_lang is not None:
_UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCAmelCase = kwargs.get("""task""" , self.task )
_UpperCAmelCase = task.split("""_""" )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
_UpperCAmelCase = items[1]
_UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->int:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...models import UNetaDModel
from ...schedulers import ScoreSdeVeScheduler
from ...utils import randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
class a_ ( _UpperCAmelCase ):
a : UNetaDModel
a : ScoreSdeVeScheduler
def __init__( self : Tuple , __UpperCamelCase : UNetaDModel , __UpperCamelCase : ScoreSdeVeScheduler ) ->str:
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self : Optional[int] , __UpperCamelCase : int = 1 , __UpperCamelCase : int = 20_00 , __UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase : Optional[str] = "pil" , __UpperCamelCase : bool = True , **__UpperCamelCase : Union[str, Any] , ) ->Union[ImagePipelineOutput, Tuple]:
_UpperCAmelCase = self.unet.config.sample_size
_UpperCAmelCase = (batch_size, 3, img_size, img_size)
_UpperCAmelCase = self.unet
_UpperCAmelCase = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase ) * self.scheduler.init_noise_sigma
_UpperCAmelCase = sample.to(self.device )
self.scheduler.set_timesteps(__UpperCamelCase )
self.scheduler.set_sigmas(__UpperCamelCase )
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
_UpperCAmelCase = self.scheduler.sigmas[i] * torch.ones(shape[0] , device=self.device )
# correction step
for _ in range(self.scheduler.config.correct_steps ):
_UpperCAmelCase = self.unet(__UpperCamelCase , __UpperCamelCase ).sample
_UpperCAmelCase = self.scheduler.step_correct(__UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# prediction step
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ).sample
_UpperCAmelCase = self.scheduler.step_pred(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase )
_UpperCAmelCase ,_UpperCAmelCase = output.prev_sample, output.prev_sample_mean
_UpperCAmelCase = sample_mean.clamp(0 , 1 )
_UpperCAmelCase = sample.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(__UpperCamelCase )
if not return_dict:
return (sample,)
return ImagePipelineOutput(images=__UpperCamelCase ) | 710 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
@property
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__UpperCamelCase )
def _snake_case ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.dummy_uncond_unet
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = self.dummy_vq_model
_UpperCAmelCase = LDMPipeline(unet=__UpperCamelCase , vqvae=__UpperCamelCase , scheduler=__UpperCamelCase )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" ).images
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" , return_dict=__UpperCamelCase )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a_ ( unittest.TestCase ):
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=5 , output_type="""numpy""" ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCAmelCase = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 19 | 0 |
"""simple docstring"""
import os
import unicodedata
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import SPIECE_UNDERLINE, logging
a : str = logging.get_logger(__name__)
a : List[str] = {'''vocab_file''': '''spiece.model'''}
a : Optional[int] = {
'''vocab_file''': {
'''TsinghuaAI/CPM-Generate''': '''https://huggingface.co/TsinghuaAI/CPM-Generate/resolve/main/spiece.model''',
}
}
class a_ ( _UpperCAmelCase ):
def __init__( self : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : int=False , __UpperCamelCase : str=True , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Optional[Any]="<s>" , __UpperCamelCase : Union[str, Any]="</s>" , __UpperCamelCase : int="<unk>" , __UpperCamelCase : List[Any]="<sep>" , __UpperCamelCase : Optional[int]="<pad>" , __UpperCamelCase : List[str]="<cls>" , __UpperCamelCase : Optional[int]="<mask>" , __UpperCamelCase : List[Any]=["<eop>", "<eod>"] , __UpperCamelCase : Optional[Dict[str, Any]] = None , **__UpperCamelCase : Any , ) ->None:
'''simple docstring'''
_UpperCAmelCase = AddedToken(__UpperCamelCase , lstrip=__UpperCamelCase , rstrip=__UpperCamelCase ) if isinstance(__UpperCamelCase , __UpperCamelCase ) else mask_token
_UpperCAmelCase = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
do_lower_case=__UpperCamelCase , remove_space=__UpperCamelCase , keep_accents=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , unk_token=__UpperCamelCase , sep_token=__UpperCamelCase , pad_token=__UpperCamelCase , cls_token=__UpperCamelCase , mask_token=__UpperCamelCase , additional_special_tokens=__UpperCamelCase , sp_model_kwargs=self.sp_model_kwargs , **__UpperCamelCase , )
_UpperCAmelCase = 3
_UpperCAmelCase = do_lower_case
_UpperCAmelCase = remove_space
_UpperCAmelCase = keep_accents
_UpperCAmelCase = vocab_file
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(__UpperCamelCase )
try:
import jieba
except ModuleNotFoundError as error:
raise error.__class__(
"""You need to install jieba to use CpmTokenizer or CpmTokenizerFast. """
"""See https://pypi.org/project/jieba/ for installation.""" )
_UpperCAmelCase = jieba
_UpperCAmelCase = str.maketrans(""" \n""" , """\u2582\u2583""" )
@property
# Copied from transformers.models.xlnet.tokenization_xlnet.XLNetTokenizer.vocab_size
def _snake_case ( self : Any ) ->List[str]:
'''simple docstring'''
return len(self.sp_model )
def _snake_case ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = {self.convert_ids_to_tokens(__UpperCamelCase ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.__dict__.copy()
_UpperCAmelCase = None
return state
def __setstate__( self : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = d
# for backward compatibility
if not hasattr(self , """sp_model_kwargs""" ):
_UpperCAmelCase = {}
_UpperCAmelCase = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
if self.remove_space:
_UpperCAmelCase = """ """.join(inputs.strip().split() )
else:
_UpperCAmelCase = inputs
_UpperCAmelCase = outputs.replace("""``""" , """\"""" ).replace("""''""" , """\"""" )
if not self.keep_accents:
_UpperCAmelCase = unicodedata.normalize("""NFKD""" , __UpperCamelCase )
_UpperCAmelCase = """""".join([c for c in outputs if not unicodedata.combining(__UpperCamelCase )] )
if self.do_lower_case:
_UpperCAmelCase = outputs.lower()
return outputs
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = self.preprocess_text(__UpperCamelCase )
_UpperCAmelCase = self.sp_model.encode(__UpperCamelCase , out_type=__UpperCamelCase )
_UpperCAmelCase = []
for piece in pieces:
if len(__UpperCamelCase ) > 1 and piece[-1] == str(""",""" ) and piece[-2].isdigit():
_UpperCAmelCase = self.sp_model.EncodeAsPieces(piece[:-1].replace(__UpperCamelCase , """""" ) )
if piece[0] != SPIECE_UNDERLINE and cur_pieces[0][0] == SPIECE_UNDERLINE:
if len(cur_pieces[0] ) == 1:
_UpperCAmelCase = cur_pieces[1:]
else:
_UpperCAmelCase = cur_pieces[0][1:]
cur_pieces.append(piece[-1] )
new_pieces.extend(__UpperCamelCase )
else:
new_pieces.append(__UpperCamelCase )
return new_pieces
def _snake_case ( self : int , __UpperCamelCase : str ) ->str:
'''simple docstring'''
return self.sp_model.PieceToId(__UpperCamelCase )
def _snake_case ( self : str , __UpperCamelCase : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
return self.sp_model.IdToPiece(__UpperCamelCase )
def _snake_case ( self : Tuple , __UpperCamelCase : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = """""".join(__UpperCamelCase ).replace(__UpperCamelCase , """ """ ).strip()
return out_string
def _snake_case ( self : int , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def _snake_case ( self : Optional[Any] , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : bool = False ) ->List[int]:
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=__UpperCamelCase , token_ids_a=__UpperCamelCase , already_has_special_tokens=__UpperCamelCase )
if token_ids_a is not None:
return ([0] * len(__UpperCamelCase )) + [1] + ([0] * len(__UpperCamelCase )) + [1, 1]
return ([0] * len(__UpperCamelCase )) + [1, 1]
def _snake_case ( self : Any , __UpperCamelCase : List[int] , __UpperCamelCase : Optional[List[int]] = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def _snake_case ( self : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(__UpperCamelCase ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file , __UpperCamelCase )
elif not os.path.isfile(self.vocab_file ):
with open(__UpperCamelCase , """wb""" ) as fi:
_UpperCAmelCase = self.sp_model.serialized_model_proto()
fi.write(__UpperCamelCase )
return (out_vocab_file,)
def _snake_case ( self : Union[str, Any] , *__UpperCamelCase : int , **__UpperCamelCase : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = super()._decode(*__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = text.replace(""" """ , """""" ).replace("""\u2582""" , """ """ ).replace("""\u2583""" , """\n""" )
return text | 711 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
a : str = True
except (ImportError, ModuleNotFoundError):
a : List[str] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
re.sub("""<n>""" , """""" , _A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_A ) ) | 19 | 0 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase ( _A , _A=False ) -> str:
"""simple docstring"""
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
a : Union[str, Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
a : Tuple = parse_flag_from_env('''RUN_REMOTE''', default=False)
a : Union[str, Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
a : int = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
a : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
a : List[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
a : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
a : int = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
a : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
a : Any = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
a : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires faiss""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires regex""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires elasticsearch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires sqlalchemy""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires PyTorch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
if not config.TF_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires TensorFlow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
if not config.JAX_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires JAX""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not config.PIL_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires Pillow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
def _require_spacy_model(_A ):
try:
import spacy # noqa F401
spacy.load(_A )
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(_A ) )(_A )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCAmelCase = unittest.skip("""test is slow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
_UpperCAmelCase = unittest.skip("""test is local""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCAmelCase = unittest.skip("""test is packaged""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCAmelCase = unittest.skip("""test requires remote""" )(_A )
return test_case
def _UpperCamelCase ( *_A ) -> Dict:
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_A ) and name.startswith("""test""" ):
for decorator in decorators:
_UpperCAmelCase = decorator(_A )
setattr(cls , _A , _A )
return cls
return decorate
class a_ ( _UpperCAmelCase ):
'''simple docstring'''
pass
class a_ ( _UpperCAmelCase ):
'''simple docstring'''
a : Any = 0
a : Optional[Any] = 1
a : int = 2
@contextmanager
def _UpperCamelCase ( _A=OfflineSimulationMode.CONNECTION_FAILS , _A=1e-16 ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = requests.Session().request
def timeout_request(_A , _A , _A , **_A ):
# Change the url to an invalid url so that the connection hangs
_UpperCAmelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_UpperCAmelCase = timeout
try:
return online_request(_A , _A , **_A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCAmelCase = url
_UpperCAmelCase = e.args[0]
_UpperCAmelCase = (max_retry_error.args[0].replace("""10.255.255.1""" , F"""OfflineMock[{url}]""" ),)
_UpperCAmelCase = (max_retry_error,)
raise
def raise_connection_error(_A , _A , **_A ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=_A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , _A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , _A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , _A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _UpperCamelCase ( *_A , **_A ) -> str:
"""simple docstring"""
_UpperCAmelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_A , **_A ) as tmp_dir:
try:
os.chdir(_A )
yield
finally:
os.chdir(_A )
@contextmanager
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
return deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist()
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_A , *_A , **_A ):
try:
return func(*_A , **_A )
except HTTPError as err:
if str(_A ).startswith("""500""" ) or str(_A ).startswith("""502""" ):
pytest.xfail(str(_A ) )
raise err
return decorator.decorator(_wrapper , _A )
class a_ :
'''simple docstring'''
def __init__( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_A )
else:
break
async def _UpperCamelCase ( _A , _A=None , _A=None , _A=None , _A=False , _A=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(_A ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_A , _A , _A , _A="" ):
_UpperCAmelCase = line.decode("""utf-8""" ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _A : tee(_A , _A , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda _A : tee(_A , _A , sys.stderr , label="""stderr:""" ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def _UpperCamelCase ( _A , _A=None , _A=None , _A=1_8_0 , _A=False , _A=True ) -> _RunOutput:
"""simple docstring"""
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
_UpperCAmelCase = """ """.join(_A )
if result.returncode > 0:
_UpperCAmelCase = """\n""".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
_UpperCAmelCase = re.sub(R"""^gw""" , """""" , _A , 0 , re.M )
return int(_A )
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 2_9_5_0_0
_UpperCAmelCase = pytest_xdist_worker_id()
return port + uniq_delta | 712 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a : str = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a_ :
a : List[Any] = PegasusConfig
a : Dict = {}
a : List[Any] = 'gelu'
def __init__( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple=13 , __UpperCamelCase : Tuple=7 , __UpperCamelCase : Tuple=True , __UpperCamelCase : Any=False , __UpperCamelCase : Any=99 , __UpperCamelCase : Optional[int]=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Dict=37 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Optional[Any]=20 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Tuple=0 , ) ->int:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def _snake_case ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _snake_case ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _UpperCamelCase ( _A , _A , _A , _A=None , _A=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = np.not_equal(_A , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a_ ( _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a : Any = True
a : int = False
a : Union[str, Any] = False
a : Optional[int] = False
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase )
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(__UpperCamelCase : List[Any] , __UpperCamelCase : str=None , **__UpperCamelCase : int ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_UpperCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self : int ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__UpperCamelCase )
_UpperCAmelCase = np.ones((1, 1) )
_UpperCAmelCase = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_UpperCAmelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""np""" , truncation=__UpperCamelCase , max_length=5_12 , padding=__UpperCamelCase )
_UpperCAmelCase = model.generate(**__UpperCamelCase , num_beams=2 ).sequences
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert tgt_text == decoded | 19 | 0 |
"""simple docstring"""
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = [1]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = 0, 0, 0
_UpperCAmelCase = ugly_nums[ia] * 2
_UpperCAmelCase = ugly_nums[ia] * 3
_UpperCAmelCase = ugly_nums[ia] * 5
for _ in range(1 , _A ):
_UpperCAmelCase = min(_A , _A , _A )
ugly_nums.append(_A )
if next_num == next_a:
ia += 1
_UpperCAmelCase = ugly_nums[ia] * 2
if next_num == next_a:
ia += 1
_UpperCAmelCase = ugly_nums[ia] * 3
if next_num == next_a:
ia += 1
_UpperCAmelCase = ugly_nums[ia] * 5
return ugly_nums[-1]
if __name__ == "__main__":
from doctest import testmod
testmod(verbose=True)
print(F"{ugly_numbers(2_0_0) = }") | 713 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : Optional[int]=13 , __UpperCamelCase : List[str]=7 , __UpperCamelCase : List[Any]=9 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : str=True , __UpperCamelCase : int=False , __UpperCamelCase : int=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : List[Any]=8 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : str=0.0_0_2 , __UpperCamelCase : Union[str, Any]=1 , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : Tuple=0 , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Any=None , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = encoder_seq_length
_UpperCAmelCase = decoder_seq_length
# For common tests
_UpperCAmelCase = self.decoder_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = d_ff
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = decoder_start_token_id
_UpperCAmelCase = None
_UpperCAmelCase = decoder_layers
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
return TaConfig.from_pretrained("""google/umt5-base""" )
def _snake_case ( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : str=None , ) ->int:
'''simple docstring'''
if attention_mask is None:
_UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCamelCase )
if decoder_head_mask is None:
_UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
if cross_attn_head_mask is None:
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = config.num_attention_heads
_UpperCAmelCase = self.prepare_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, input_dict
def _snake_case ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : Tuple ) ->Dict:
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , )
_UpperCAmelCase = model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase )
_UpperCAmelCase = result.last_hidden_state
_UpperCAmelCase = result.past_key_values
_UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , ) ->str:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).get_decoder().to(__UpperCamelCase ).eval()
# first forward pass
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) + 1 )
_UpperCAmelCase ,_UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = model(__UpperCamelCase )["""last_hidden_state"""]
_UpperCAmelCase = model(__UpperCamelCase , past_key_values=__UpperCamelCase )["""last_hidden_state"""]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def _snake_case ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Dict , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).to(__UpperCamelCase ).half().eval()
_UpperCAmelCase = model(**__UpperCamelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__UpperCamelCase ).any().item() )
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a : Optional[Any] = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a : Any = True
a : Optional[int] = False
a : Any = False
a : Optional[int] = True
a : Optional[Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a : int = [0.8, 0.9]
def _snake_case ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = UMTaModel(config_and_inputs[0] ).to(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=__UpperCamelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs[0]
_UpperCAmelCase = UMTaForConditionalGeneration(__UpperCamelCase ).eval()
model.to(__UpperCamelCase )
_UpperCAmelCase = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__UpperCamelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
}
for attn_name, (name, mask) in zip(__UpperCamelCase , head_masking.items() ):
_UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCamelCase )
_UpperCAmelCase = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , **__UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _snake_case ( self : Tuple ) ->List[Any]:
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__UpperCamelCase ).to(__UpperCamelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__UpperCamelCase , legacy=__UpperCamelCase )
_UpperCAmelCase = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase ).input_ids
# fmt: off
_UpperCAmelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model.generate(input_ids.to(__UpperCamelCase ) )
_UpperCAmelCase = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : int = {'''vocab_file''': '''vocab.json'''}
a : Any = {
'''vocab_file''': {
'''mgp-str''': '''https://huggingface.co/alibaba-damo/mgp-str-base/blob/main/vocab.json''',
}
}
a : List[str] = {'''mgp-str''': 2_7}
class a_ ( _UpperCAmelCase ):
a : Union[str, Any] = VOCAB_FILES_NAMES
a : str = PRETRAINED_VOCAB_FILES_MAP
a : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
def __init__( self : int , __UpperCamelCase : Dict , __UpperCamelCase : Dict="[GO]" , __UpperCamelCase : int="[GO]" , __UpperCamelCase : Union[str, Any]="[s]" , __UpperCamelCase : List[str]="[GO]" , **__UpperCamelCase : Dict ) ->Optional[Any]:
'''simple docstring'''
super().__init__(
unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , **__UpperCamelCase , )
with open(__UpperCamelCase , encoding="""utf-8""" ) as vocab_handle:
_UpperCAmelCase = json.load(__UpperCamelCase )
_UpperCAmelCase = {v: k for k, v in self.vocab.items()}
@property
def _snake_case ( self : int ) ->List[Any]:
'''simple docstring'''
return len(self.vocab )
def _snake_case ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
return dict(self.vocab , **self.added_tokens_encoder )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Optional[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase = []
for s in text:
char_tokens.extend(__UpperCamelCase )
return char_tokens
def _snake_case ( self : int , __UpperCamelCase : Optional[int] ) ->Any:
'''simple docstring'''
return self.vocab.get(__UpperCamelCase , self.vocab.get(self.unk_token ) )
def _snake_case ( self : List[Any] , __UpperCamelCase : Any ) ->str:
'''simple docstring'''
return self.decoder.get(__UpperCamelCase )
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error("""Vocabulary path ({}) should be a directory""".format(__UpperCamelCase ) )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.vocab , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + """\n""" )
return (vocab_file,) | 714 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( _UpperCAmelCase ):
def _snake_case ( self : str ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : Any ) ->List[str]:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _snake_case ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : str ) ->Dict:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def _snake_case ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def _snake_case ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _snake_case ( self : str ) ->Optional[Any]:
'''simple docstring'''
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__UpperCamelCase ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
_UpperCAmelCase ,_UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __UpperCamelCase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(_A ) if isinstance(_A , pa.Buffer ) else pa.memory_map(_A )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=_A , features=_A ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_A )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
_UpperCAmelCase = os.path.join(_A , """test.arrow""" )
with ArrowWriter(path=_A , schema=pa.schema(_A ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(_A , 1 )
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
if pa.types.is_list(_A ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
if isinstance(lst[0] , _A ):
change_first_primitive_element_in_list(lst[0] , _A )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(_A , optimized_int_type=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(_A )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_A , _A )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=_A ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = """mock://dataset-train.arrow"""
with ArrowWriter(path=_A , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_A ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_A )
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_A , format="""png""" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=_A , features=Features({"""image""": Image()} ) , embed_local_files=_A ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , _A )
with open(_A , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("""col_1""" , pa.string() , nullable=_A )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=_A ) as writer:
writer._build_writer(inferred_schema=_A )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] ) | 19 | 0 |
"""simple docstring"""
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
if divisor % 5 == 0 or divisor % 2 == 0:
return 0
_UpperCAmelCase = 1
_UpperCAmelCase = 1
while repunit:
_UpperCAmelCase = (1_0 * repunit + 1) % divisor
repunit_index += 1
return repunit_index
def _UpperCamelCase ( _A = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
_UpperCAmelCase = limit - 1
if divisor % 2 == 0:
divisor += 1
while least_divisible_repunit(_A ) <= limit:
divisor += 2
return divisor
if __name__ == "__main__":
print(F"{solution() = }") | 715 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : List[Any] = get_logger()
a : Optional[dict] = None
class a_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : int ) ->Tuple:
'''simple docstring'''
super().__init__(features=__UpperCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(__UpperCamelCase )}, as `jaxlib.xla_extension.Device` """
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
_UpperCAmelCase = device if isinstance(__UpperCamelCase , __UpperCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
_UpperCAmelCase = str(jax.devices()[0] )
_UpperCAmelCase = jnp_array_kwargs
@staticmethod
def _snake_case ( ) ->Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(__UpperCamelCase ): device for device in jax.devices()}
def _snake_case ( self : Dict , __UpperCamelCase : Any ) ->Union[str, Any]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , __UpperCamelCase ) and column:
if all(
isinstance(__UpperCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__UpperCamelCase , axis=0 )
return column
def _snake_case ( self : List[str] , __UpperCamelCase : Any ) ->Optional[int]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , (str, bytes, type(__UpperCamelCase )) ):
return value
elif isinstance(__UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase = {}
if isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
else:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
elif isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCamelCase , PIL.Image.Image ):
_UpperCAmelCase = np.asarray(__UpperCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__UpperCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__UpperCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__UpperCamelCase , """__array__""" ) and not isinstance(__UpperCamelCase , jax.Array ):
_UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
elif isinstance(__UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : dict ) ->int:
'''simple docstring'''
return map_nested(self._recursive_tensorize , __UpperCamelCase , map_list=__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_row(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_row(__UpperCamelCase )
return self.recursive_tensorize(__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : pa.Table ) ->"jax.Array":
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_column(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_column(__UpperCamelCase , pa_table.column_names[0] )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
_UpperCAmelCase = self._consolidate(__UpperCamelCase )
return column
def _snake_case ( self : Optional[Any] , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_batch(__UpperCamelCase )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
for column_name in batch:
_UpperCAmelCase = self._consolidate(batch[column_name] )
return batch | 19 | 0 |
"""simple docstring"""
from diffusers.utils.testing_utils import require_onnxruntime
@require_onnxruntime
class a_ :
'''simple docstring'''
pass | 716 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Tuple = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 19 | 0 |
"""simple docstring"""
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if (
(cp >= 0X4E00 and cp <= 0X9FFF)
or (cp >= 0X3400 and cp <= 0X4DBF) #
or (cp >= 0X2_0000 and cp <= 0X2_A6DF) #
or (cp >= 0X2_A700 and cp <= 0X2_B73F) #
or (cp >= 0X2_B740 and cp <= 0X2_B81F) #
or (cp >= 0X2_B820 and cp <= 0X2_CEAF) #
or (cp >= 0XF900 and cp <= 0XFAFF)
or (cp >= 0X2_F800 and cp <= 0X2_FA1F) #
): #
return True
return False
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
for char in word:
_UpperCAmelCase = ord(_A )
if not _is_chinese_char(_A ):
return 0
return 1
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = set()
for token in tokens:
_UpperCAmelCase = len(_A ) > 1 and is_chinese(_A )
if chinese_word:
word_set.add(_A )
_UpperCAmelCase = list(_A )
return word_list
def _UpperCamelCase ( _A , _A ) -> List[str]:
"""simple docstring"""
if not chinese_word_set:
return bert_tokens
_UpperCAmelCase = max([len(_A ) for w in chinese_word_set] )
_UpperCAmelCase = bert_tokens
_UpperCAmelCase ,_UpperCAmelCase = 0, len(_A )
while start < end:
_UpperCAmelCase = True
if is_chinese(bert_word[start] ):
_UpperCAmelCase = min(end - start , _A )
for i in range(_A , 1 , -1 ):
_UpperCAmelCase = """""".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_UpperCAmelCase = """##""" + bert_word[j]
_UpperCAmelCase = start + i
_UpperCAmelCase = False
break
if single_word:
start += 1
return bert_word
def _UpperCamelCase ( _A , _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = []
for i in range(0 , len(_A ) , 1_0_0 ):
_UpperCAmelCase = ltp_tokenizer.pipeline(lines[i : i + 1_0_0] , tasks=["""cws"""] ).cws
_UpperCAmelCase = [get_chinese_word(_A ) for r in res]
ltp_res.extend(_A )
assert len(_A ) == len(_A )
_UpperCAmelCase = []
for i in range(0 , len(_A ) , 1_0_0 ):
_UpperCAmelCase = bert_tokenizer(lines[i : i + 1_0_0] , add_special_tokens=_A , truncation=_A , max_length=5_1_2 )
bert_res.extend(res["""input_ids"""] )
assert len(_A ) == len(_A )
_UpperCAmelCase = []
for input_ids, chinese_word in zip(_A , _A ):
_UpperCAmelCase = []
for id in input_ids:
_UpperCAmelCase = bert_tokenizer._convert_id_to_token(_A )
input_tokens.append(_A )
_UpperCAmelCase = add_sub_symbol(_A , _A )
_UpperCAmelCase = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_A ):
if token[:2] == "##":
_UpperCAmelCase = token[2:]
# save chinese tokens' pos
if len(_A ) == 1 and _is_chinese_char(ord(_A ) ):
ref_id.append(_A )
ref_ids.append(_A )
assert len(_A ) == len(_A )
return ref_ids
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
with open(args.file_name , """r""" , encoding="""utf-8""" ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = [line.strip() for line in data if len(_A ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_UpperCAmelCase = LTP(args.ltp ) # faster in GPU device
_UpperCAmelCase = BertTokenizer.from_pretrained(args.bert )
_UpperCAmelCase = prepare_ref(_A , _A , _A )
with open(args.save_path , """w""" , encoding="""utf-8""" ) as f:
_UpperCAmelCase = [json.dumps(_A ) + """\n""" for ref in ref_ids]
f.writelines(_A )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser(description='''prepare_chinese_ref''')
parser.add_argument(
'''--file_name''',
required=False,
type=str,
default='''./resources/chinese-demo.txt''',
help='''file need process, same as training data in lm''',
)
parser.add_argument(
'''--ltp''',
required=False,
type=str,
default='''./resources/ltp''',
help='''resources for LTP tokenizer, usually a path''',
)
parser.add_argument(
'''--bert''',
required=False,
type=str,
default='''./resources/robert''',
help='''resources for Bert tokenizer''',
)
parser.add_argument(
'''--save_path''',
required=False,
type=str,
default='''./resources/ref.txt''',
help='''path to save res''',
)
a : Union[str, Any] = parser.parse_args()
main(args) | 717 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
a : int = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _UpperCamelCase ( _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = test_results.split(""" """ )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_UpperCAmelCase = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(_A ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = None
_UpperCAmelCase = False
for line in failures_short_lines.split("""\n""" ):
if re.search(R"""_ \[doctest\]""" , _A ):
_UpperCAmelCase = True
_UpperCAmelCase = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
_UpperCAmelCase = line
_UpperCAmelCase = False
return failures
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = title
_UpperCAmelCase = doc_test_results["""time_spent"""].split(""",""" )[0]
_UpperCAmelCase = doc_test_results["""success"""]
_UpperCAmelCase = doc_test_results["""failures"""]
_UpperCAmelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
_UpperCAmelCase = doc_test_results
@property
def _snake_case ( self : int ) ->str:
'''simple docstring'''
_UpperCAmelCase = [self._time_spent]
_UpperCAmelCase = 0
for time in time_spent:
_UpperCAmelCase = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = [0, 0, time_parts[0]]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f"""{int(__UpperCamelCase )}h{int(__UpperCamelCase )}m{int(__UpperCamelCase )}s"""
@property
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = 40
_UpperCAmelCase = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(__UpperCamelCase , __UpperCamelCase )}
_UpperCAmelCase = """"""
for category, failures in category_failures.items():
if len(__UpperCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__UpperCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__UpperCamelCase )
@staticmethod
def _snake_case ( ) ->Any:
'''simple docstring'''
_UpperCAmelCase = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(__UpperCamelCase )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=__UpperCamelCase , )
def _snake_case ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
_UpperCAmelCase = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else """All tests passed."""
_UpperCAmelCase = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=__UpperCamelCase , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase = """"""
for key, value in failures.items():
_UpperCAmelCase = value[:2_00] + """ [Truncated]""" if len(__UpperCamelCase ) > 2_50 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
_UpperCAmelCase = job_name
_UpperCAmelCase = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
_UpperCAmelCase = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _snake_case ( self : int ) ->Optional[Any]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
_UpperCAmelCase = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
_UpperCAmelCase = sorted(self.doc_test_results.items() , key=lambda __UpperCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
_UpperCAmelCase = f"""*Num failures* :{len(job_result["failed"] )} \n"""
_UpperCAmelCase = job_result["""failures"""]
_UpperCAmelCase = self.get_reply_blocks(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , text=__UpperCamelCase )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f"""Results for {job}""" , blocks=__UpperCamelCase , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = os.environ["""GITHUB_RUN_ID"""]
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A ).json()
_UpperCAmelCase = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , _A )
return {}
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {}
if os.path.exists(_A ):
_UpperCAmelCase = os.listdir(_A )
for file in files:
try:
with open(os.path.join(_A , _A ) , encoding="""utf-8""" ) as f:
_UpperCAmelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(_A , _A )}.""" ) from e
return _artifact
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
class a_ :
def __init__( self : List[Any] , __UpperCamelCase : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = name
_UpperCAmelCase = []
def __str__( self : int ) ->Optional[Any]:
'''simple docstring'''
return self.name
def _snake_case ( self : Dict , __UpperCamelCase : str ) ->int:
'''simple docstring'''
self.paths.append({"""name""": self.name, """path""": path} )
_UpperCAmelCase = {}
_UpperCAmelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
_UpperCAmelCase = directory
if artifact_name not in _available_artifacts:
_UpperCAmelCase = Artifact(_A )
_available_artifacts[artifact_name].add_path(_A )
return _available_artifacts
if __name__ == "__main__":
a : Dict = get_job_links()
a : Dict = retrieve_available_artifacts()
a : Optional[int] = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
a : Dict = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
a : int = github_actions_job_links.get('''run_doctests''')
a : Tuple = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
a : Optional[Any] = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
a , a , a : str = handle_test_results(artifact['''stats'''])
a : Tuple = failed
a : int = success
a : Any = time_spent[1:-1] + ''', '''
a : Dict = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
a : List[Any] = line.replace('''FAILED ''', '''''')
a : Tuple = line.split()[0].replace('''\n''', '''''')
if "::" in line:
a , a : Union[str, Any] = line.split('''::''')
else:
a , a : Optional[Any] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
a : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
a : Optional[Any] = all_failures[test] if test in all_failures else '''N/A'''
a : List[str] = failure
break
a : List[Any] = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply() | 19 | 0 |
from typing import Dict, List, Optional, Tuple, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_torch_available, is_torch_tensor, logging
if is_torch_available():
import torch
a : Optional[Any] = logging.get_logger(__name__)
class a_ ( _UpperCAmelCase ):
a : Tuple = ['pixel_values']
def __init__( self : Optional[int] , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Dict[str, int]] = None , __UpperCamelCase : PILImageResampling = PILImageResampling.BILINEAR , __UpperCamelCase : bool = True , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : bool = True , __UpperCamelCase : Union[int, float] = 1 / 2_55 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , **__UpperCamelCase : List[str] , ) ->None:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 2_56}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 2_24, """width""": 2_24}
_UpperCAmelCase = get_size_dict(__UpperCamelCase , param_name="""crop_size""" )
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = resample
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_rescale
_UpperCAmelCase = rescale_factor
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
_UpperCAmelCase = image_std if image_std is not None else IMAGENET_STANDARD_STD
def _snake_case ( self : List[str] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : PILImageResampling = PILImageResampling.BICUBIC , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , ) ->np.ndarray:
'''simple docstring'''
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
if "shortest_edge" not in size:
raise ValueError(f"""The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}""" )
_UpperCAmelCase = get_resize_output_image_size(__UpperCamelCase , size=size["""shortest_edge"""] , default_to_square=__UpperCamelCase )
return resize(__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Tuple , __UpperCamelCase : np.ndarray , __UpperCamelCase : Dict[str, int] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Tuple , ) ->np.ndarray:
'''simple docstring'''
_UpperCAmelCase = get_size_dict(__UpperCamelCase )
if "height" not in size or "width" not in size:
raise ValueError(f"""The `size` parameter must contain the keys `height` and `width`. Got {size.keys()}""" )
return center_crop(__UpperCamelCase , size=(size["""height"""], size["""width"""]) , data_format=__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : np.ndarray , __UpperCamelCase : float , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : Optional[Any] ) ->np.ndarray:
'''simple docstring'''
return rescale(__UpperCamelCase , scale=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : np.ndarray , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Union[float, List[float]] , __UpperCamelCase : Optional[Union[str, ChannelDimension]] = None , **__UpperCamelCase : List[str] , ) ->np.ndarray:
'''simple docstring'''
return normalize(__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase , data_format=__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : ImageInput , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : PILImageResampling = None , __UpperCamelCase : bool = None , __UpperCamelCase : Dict[str, int] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[float] = None , __UpperCamelCase : Optional[bool] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[float, List[float]]] = None , __UpperCamelCase : Optional[Union[str, TensorType]] = None , __UpperCamelCase : Union[str, ChannelDimension] = ChannelDimension.FIRST , **__UpperCamelCase : Optional[int] , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = do_resize if do_resize is not None else self.do_resize
_UpperCAmelCase = size if size is not None else self.size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , default_to_square=__UpperCamelCase )
_UpperCAmelCase = resample if resample is not None else self.resample
_UpperCAmelCase = do_center_crop if do_center_crop is not None else self.do_center_crop
_UpperCAmelCase = crop_size if crop_size is not None else self.crop_size
_UpperCAmelCase = get_size_dict(__UpperCamelCase , param_name="""crop_size""" )
_UpperCAmelCase = do_rescale if do_rescale is not None else self.do_rescale
_UpperCAmelCase = rescale_factor if rescale_factor is not None else self.rescale_factor
_UpperCAmelCase = do_normalize if do_normalize is not None else self.do_normalize
_UpperCAmelCase = image_mean if image_mean is not None else self.image_mean
_UpperCAmelCase = image_std if image_std is not None else self.image_std
_UpperCAmelCase = make_list_of_images(__UpperCamelCase )
if not valid_images(__UpperCamelCase ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# All transformations expect numpy arrays.
_UpperCAmelCase = [to_numpy_array(__UpperCamelCase ) for image in images]
if do_resize:
_UpperCAmelCase = [self.resize(image=__UpperCamelCase , size=__UpperCamelCase , resample=__UpperCamelCase ) for image in images]
if do_center_crop:
_UpperCAmelCase = [self.center_crop(image=__UpperCamelCase , size=__UpperCamelCase ) for image in images]
if do_rescale:
_UpperCAmelCase = [self.rescale(image=__UpperCamelCase , scale=__UpperCamelCase ) for image in images]
if do_normalize:
_UpperCAmelCase = [self.normalize(image=__UpperCamelCase , mean=__UpperCamelCase , std=__UpperCamelCase ) for image in images]
_UpperCAmelCase = [to_channel_dimension_format(__UpperCamelCase , __UpperCamelCase ) for image in images]
_UpperCAmelCase = {"""pixel_values""": images}
return BatchFeature(data=__UpperCamelCase , tensor_type=__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Tuple] = None ) ->int:
'''simple docstring'''
_UpperCAmelCase = outputs.logits
# Resize logits and compute semantic segmentation maps
if target_sizes is not None:
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
raise ValueError(
"""Make sure that you pass in as many target sizes as the batch dimension of the logits""" )
if is_torch_tensor(__UpperCamelCase ):
_UpperCAmelCase = target_sizes.numpy()
_UpperCAmelCase = []
for idx in range(len(__UpperCamelCase ) ):
_UpperCAmelCase = torch.nn.functional.interpolate(
logits[idx].unsqueeze(dim=0 ) , size=target_sizes[idx] , mode="""bilinear""" , align_corners=__UpperCamelCase )
_UpperCAmelCase = resized_logits[0].argmax(dim=0 )
semantic_segmentation.append(__UpperCamelCase )
else:
_UpperCAmelCase = logits.argmax(dim=1 )
_UpperCAmelCase = [semantic_segmentation[i] for i in range(semantic_segmentation.shape[0] )]
return semantic_segmentation | 718 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase ( _A , _A=False ) -> str:
"""simple docstring"""
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
a : Union[str, Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
a : Tuple = parse_flag_from_env('''RUN_REMOTE''', default=False)
a : Union[str, Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
a : int = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
a : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
a : List[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
a : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
a : int = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
a : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
a : Any = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
a : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires faiss""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires regex""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires elasticsearch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires sqlalchemy""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires PyTorch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
if not config.TF_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires TensorFlow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
if not config.JAX_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires JAX""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not config.PIL_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires Pillow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
def _require_spacy_model(_A ):
try:
import spacy # noqa F401
spacy.load(_A )
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(_A ) )(_A )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCAmelCase = unittest.skip("""test is slow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
_UpperCAmelCase = unittest.skip("""test is local""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCAmelCase = unittest.skip("""test is packaged""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCAmelCase = unittest.skip("""test requires remote""" )(_A )
return test_case
def _UpperCamelCase ( *_A ) -> Dict:
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_A ) and name.startswith("""test""" ):
for decorator in decorators:
_UpperCAmelCase = decorator(_A )
setattr(cls , _A , _A )
return cls
return decorate
class a_ ( _UpperCAmelCase ):
pass
class a_ ( _UpperCAmelCase ):
a : Any = 0
a : Optional[Any] = 1
a : int = 2
@contextmanager
def _UpperCamelCase ( _A=OfflineSimulationMode.CONNECTION_FAILS , _A=1e-16 ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = requests.Session().request
def timeout_request(_A , _A , _A , **_A ):
# Change the url to an invalid url so that the connection hangs
_UpperCAmelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_UpperCAmelCase = timeout
try:
return online_request(_A , _A , **_A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCAmelCase = url
_UpperCAmelCase = e.args[0]
_UpperCAmelCase = (max_retry_error.args[0].replace("""10.255.255.1""" , F"""OfflineMock[{url}]""" ),)
_UpperCAmelCase = (max_retry_error,)
raise
def raise_connection_error(_A , _A , **_A ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=_A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , _A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , _A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , _A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _UpperCamelCase ( *_A , **_A ) -> str:
"""simple docstring"""
_UpperCAmelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_A , **_A ) as tmp_dir:
try:
os.chdir(_A )
yield
finally:
os.chdir(_A )
@contextmanager
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
return deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist()
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_A , *_A , **_A ):
try:
return func(*_A , **_A )
except HTTPError as err:
if str(_A ).startswith("""500""" ) or str(_A ).startswith("""502""" ):
pytest.xfail(str(_A ) )
raise err
return decorator.decorator(_wrapper , _A )
class a_ :
def __init__( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_A )
else:
break
async def _UpperCamelCase ( _A , _A=None , _A=None , _A=None , _A=False , _A=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(_A ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_A , _A , _A , _A="" ):
_UpperCAmelCase = line.decode("""utf-8""" ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _A : tee(_A , _A , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda _A : tee(_A , _A , sys.stderr , label="""stderr:""" ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def _UpperCamelCase ( _A , _A=None , _A=None , _A=1_8_0 , _A=False , _A=True ) -> _RunOutput:
"""simple docstring"""
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
_UpperCAmelCase = """ """.join(_A )
if result.returncode > 0:
_UpperCAmelCase = """\n""".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
_UpperCAmelCase = re.sub(R"""^gw""" , """""" , _A , 0 , re.M )
return int(_A )
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 2_9_5_0_0
_UpperCAmelCase = pytest_xdist_worker_id()
return port + uniq_delta | 19 | 0 |
"""simple docstring"""
import math
def _UpperCamelCase ( _A ) -> bool:
"""simple docstring"""
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(_A ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def _UpperCamelCase ( _A = 1_0_0_0_1 ) -> int:
"""simple docstring"""
try:
_UpperCAmelCase = int(_A )
except (TypeError, ValueError):
raise TypeError("""Parameter nth must be int or castable to int.""" ) from None
if nth <= 0:
raise ValueError("""Parameter nth must be greater than or equal to one.""" )
_UpperCAmelCase = []
_UpperCAmelCase = 2
while len(_A ) < nth:
if is_prime(_A ):
primes.append(_A )
num += 1
else:
num += 1
return primes[len(_A ) - 1]
if __name__ == "__main__":
print(F"{solution() = }") | 719 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class a_ ( _UpperCAmelCase ):
a : List[Any] = ''
a : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Tuple , __UpperCamelCase : Optional[DatasetInfo] = None , __UpperCamelCase : Optional[str] = None , **__UpperCamelCase : Any , ) ->Any:
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
_UpperCAmelCase = repo_info
_UpperCAmelCase = token
_UpperCAmelCase = None
def _snake_case ( self : List[str] ) ->List[str]:
'''simple docstring'''
if self.dir_cache is None:
_UpperCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_UpperCAmelCase = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : str = "rb" , **__UpperCamelCase : Any , ) ->List[str]:
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
_UpperCAmelCase = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def _snake_case ( self : int , __UpperCamelCase : int , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple=False , **__UpperCamelCase : List[str] ) ->Optional[Any]:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = PurePosixPath(path.strip("""/""" ) )
_UpperCAmelCase = {}
for p, f in self.dir_cache.items():
_UpperCAmelCase = PurePosixPath(p.strip("""/""" ) )
_UpperCAmelCase = p.parent
if root == path:
_UpperCAmelCase = f
_UpperCAmelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 19 | 0 |
from typing import Any
class a_ :
def __init__( self : int , __UpperCamelCase : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase = data
_UpperCAmelCase = None
class a_ :
def __init__( self : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase = None
def _snake_case ( self : str ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.head
while temp is not None:
print(temp.data , end=""" """ )
_UpperCAmelCase = temp.next
print()
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Any ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = Node(__UpperCamelCase )
_UpperCAmelCase = self.head
_UpperCAmelCase = new_node
def _snake_case ( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
if node_data_a == node_data_a:
return
else:
_UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase = node_a.next
_UpperCAmelCase = self.head
while node_a is not None and node_a.data != node_data_a:
_UpperCAmelCase = node_a.next
if node_a is None or node_a is None:
return
_UpperCAmelCase ,_UpperCAmelCase = node_a.data, node_a.data
if __name__ == "__main__":
a : Any = LinkedList()
for i in range(5, 0, -1):
ll.push(i)
ll.print_list()
ll.swap_nodes(1, 4)
print('''After swapping''')
ll.print_list() | 720 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
a : Optional[Any] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
a : List[str] = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
a : Any = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
a : int = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
a : List[Any] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any]=[1, 10, 1_00] , __UpperCamelCase : Dict=4 , __UpperCamelCase : Tuple=3.0 ) ->Union[str, Any]:
'''simple docstring'''
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=__UpperCamelCase ) as executor:
_UpperCAmelCase = []
_UpperCAmelCase = Counter()
_UpperCAmelCase = 0
_UpperCAmelCase = defaultdict(__UpperCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(__UpperCamelCase , __UpperCamelCase ) ):
for candidate in candidates:
_UpperCAmelCase = candidate + """\n""" + test_case
_UpperCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
_UpperCAmelCase = executor.submit(__UpperCamelCase , *__UpperCamelCase )
futures.append(__UpperCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__UpperCamelCase ):
_UpperCAmelCase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
_UpperCAmelCase ,_UpperCAmelCase = [], []
for result in results.values():
result.sort()
_UpperCAmelCase = [r[1]["""passed"""] for r in result]
total.append(len(__UpperCamelCase ) )
correct.append(sum(__UpperCamelCase ) )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = k
_UpperCAmelCase = {f"""pass@{k}""": estimate_pass_at_k(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
def estimator(_A , _A , _A ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_A , _A ):
_UpperCAmelCase = itertools.repeat(_A , len(_A ) )
else:
assert len(_A ) == len(_A )
_UpperCAmelCase = iter(_A )
return np.array([estimator(int(_A ) , int(_A ) , _A ) for n, c in zip(_A , _A )] ) | 19 | 0 |
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class a_ ( _UpperCAmelCase ):
a : List[Any] = ''
a : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Tuple , __UpperCamelCase : Optional[DatasetInfo] = None , __UpperCamelCase : Optional[str] = None , **__UpperCamelCase : Any , ) ->Any:
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
_UpperCAmelCase = repo_info
_UpperCAmelCase = token
_UpperCAmelCase = None
def _snake_case ( self : List[str] ) ->List[str]:
'''simple docstring'''
if self.dir_cache is None:
_UpperCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_UpperCAmelCase = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : str = "rb" , **__UpperCamelCase : Any , ) ->List[str]:
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
_UpperCAmelCase = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def _snake_case ( self : int , __UpperCamelCase : int , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple=False , **__UpperCamelCase : List[str] ) ->Optional[Any]:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = PurePosixPath(path.strip("""/""" ) )
_UpperCAmelCase = {}
for p, f in self.dir_cache.items():
_UpperCAmelCase = PurePosixPath(p.strip("""/""" ) )
_UpperCAmelCase = p.parent
if root == path:
_UpperCAmelCase = f
_UpperCAmelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 721 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> np.array:
"""simple docstring"""
_UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCAmelCase = np.zeros((n + 1,) )
_UpperCAmelCase = ya
_UpperCAmelCase = xa
for k in range(_A ):
_UpperCAmelCase = y[k] + step_size * ode_func(_A , y[k] )
_UpperCAmelCase = y[k] + (
(step_size / 2) * (ode_func(_A , y[k] ) + ode_func(x + step_size , _A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 19 | 0 |
"""simple docstring"""
import importlib
import sys
from argparse import REMAINDER, ArgumentParser
from pathlib import Path
import torch_xla.distributed.xla_multiprocessing as xmp
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = ArgumentParser(
description=(
"""PyTorch TPU distributed training launch helper utility that will spawn up multiple distributed processes"""
) )
# Optional arguments for the launch helper
parser.add_argument("""--num_cores""" , type=_A , default=1 , help="""Number of TPU cores to use (1 or 8).""" )
# positional
parser.add_argument(
"""training_script""" , type=_A , help=(
"""The full path to the single TPU training """
"""program/script to be launched in parallel, """
"""followed by all the arguments for the """
"""training script"""
) , )
# rest from the training program
parser.add_argument("""training_script_args""" , nargs=_A )
return parser.parse_args()
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = parse_args()
# Import training_script as a module.
_UpperCAmelCase = Path(args.training_script )
sys.path.append(str(script_fpath.parent.resolve() ) )
_UpperCAmelCase = script_fpath.stem
_UpperCAmelCase = importlib.import_module(_A )
# Patch sys.argv
_UpperCAmelCase = [args.training_script] + args.training_script_args + ["""--tpu_num_cores""", str(args.num_cores )]
xmp.spawn(mod._mp_fn , args=() , nprocs=args.num_cores )
if __name__ == "__main__":
main() | 700 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
a : List[str] = logging.getLogger(__name__)
a : int = {'''facebook/bart-base''': BartForConditionalGeneration}
a : Dict = {'''facebook/bart-base''': BartTokenizer}
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_A , default=_A , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_A , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_A , default=_A , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_A , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_A , )
parser.add_argument(
"""--config_name""" , type=_A , default=_A , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_A , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_A , default=_A , help="""Where to store the final ONNX file.""" )
_UpperCAmelCase = parser.parse_args()
return args
def _UpperCamelCase ( _A , _A="cpu" ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = model_dict[model_name].from_pretrained(_A ).to(_A )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_A )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_A ) )
with torch.no_grad():
_UpperCAmelCase = """My friends are cool but they eat too many carbs."""
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="""pt""" ).to(model.device )
_UpperCAmelCase = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_A , max_length=_A , early_stopping=_A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_A , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _A , opset_version=1_4 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_A , )
logger.info("""Model exported to {}""".format(_A ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_A ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_A ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_A )
_UpperCAmelCase = ort_sess.run(
_A , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_A ),
"""max_length""": np.array(_A ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase ,_UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _A )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_A )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_A , _A , _A , _A , _A )
if __name__ == "__main__":
main() | 19 | 0 |
"""simple docstring"""
import math
from typing import Optional
import numpy as np
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : List[str] = {
'''facebook/encodec_24khz''': '''https://huggingface.co/facebook/encodec_24khz/resolve/main/config.json''',
'''facebook/encodec_48khz''': '''https://huggingface.co/facebook/encodec_48khz/resolve/main/config.json''',
}
class a_ ( _UpperCAmelCase ):
a : str = 'encodec'
def __init__( self : Dict , __UpperCamelCase : Optional[Any]=[1.5, 3.0, 6.0, 12.0, 24.0] , __UpperCamelCase : str=2_40_00 , __UpperCamelCase : List[str]=1 , __UpperCamelCase : List[str]=False , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Union[str, Any]=1_28 , __UpperCamelCase : Dict=32 , __UpperCamelCase : int=1 , __UpperCamelCase : List[str]=[8, 5, 4, 2] , __UpperCamelCase : str="weight_norm" , __UpperCamelCase : Union[str, Any]=7 , __UpperCamelCase : int=7 , __UpperCamelCase : Tuple=3 , __UpperCamelCase : Dict=2 , __UpperCamelCase : int=True , __UpperCamelCase : Any="reflect" , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : Tuple=1.0 , __UpperCamelCase : int=10_24 , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=True , **__UpperCamelCase : int , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = target_bandwidths
_UpperCAmelCase = sampling_rate
_UpperCAmelCase = audio_channels
_UpperCAmelCase = normalize
_UpperCAmelCase = chunk_length_s
_UpperCAmelCase = overlap
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_filters
_UpperCAmelCase = num_residual_layers
_UpperCAmelCase = upsampling_ratios
_UpperCAmelCase = norm_type
_UpperCAmelCase = kernel_size
_UpperCAmelCase = last_kernel_size
_UpperCAmelCase = residual_kernel_size
_UpperCAmelCase = dilation_growth_rate
_UpperCAmelCase = use_causal_conv
_UpperCAmelCase = pad_mode
_UpperCAmelCase = compress
_UpperCAmelCase = num_lstm_layers
_UpperCAmelCase = trim_right_ratio
_UpperCAmelCase = codebook_size
_UpperCAmelCase = codebook_dim if codebook_dim is not None else hidden_size
_UpperCAmelCase = use_conv_shortcut
if self.norm_type not in ["weight_norm", "time_group_norm"]:
raise ValueError(
f"""self.norm_type must be one of `\"weight_norm\"`, `\"time_group_norm\"`), got {self.norm_type}""" )
super().__init__(**__UpperCamelCase )
@property
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
@property
def _snake_case ( self : Union[str, Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase = np.prod(self.upsampling_ratios )
return math.ceil(self.sampling_rate / hop_length )
@property
def _snake_case ( self : List[str] ) ->int:
'''simple docstring'''
return int(10_00 * self.target_bandwidths[-1] // (self.frame_rate * 10) ) | 701 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A , _A , _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = requests.get(_A , headers=_A , allow_redirects=_A )
_UpperCAmelCase = result.headers["""Location"""]
_UpperCAmelCase = requests.get(_A , allow_redirects=_A )
_UpperCAmelCase = os.path.join(_A , F"""{artifact_name}.zip""" )
with open(_A , """wb""" ) as fp:
fp.write(response.content )
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = None
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_A ) as f:
for line in f:
_UpperCAmelCase = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_UpperCAmelCase = line[: line.index(""": """ )]
_UpperCAmelCase = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_UpperCAmelCase = line[len("""FAILED """ ) :]
failed_tests.append(_A )
elif filename == "job_name.txt":
_UpperCAmelCase = line
if len(_A ) != len(_A ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(_A )} for `errors` """
F"""and {len(_A )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
_UpperCAmelCase = None
if job_name and job_links:
_UpperCAmelCase = job_links.get(_A , _A )
# A list with elements of the form (line of error, error, failed test)
_UpperCAmelCase = [x + [y] + [job_link] for x, y in zip(_A , _A )]
return result
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = [os.path.join(_A , _A ) for p in os.listdir(_A ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_A , job_links=_A ) )
return errors
def _UpperCamelCase ( _A , _A=None ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_UpperCAmelCase = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_UpperCAmelCase = test.split("""/""" )[2]
else:
_UpperCAmelCase = None
return test
def _UpperCamelCase ( _A , _A=None ) -> Any:
"""simple docstring"""
_UpperCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_UpperCAmelCase = [x for x in logs if x[2] is not None]
_UpperCAmelCase = {x[2] for x in logs}
_UpperCAmelCase = {}
for test in tests:
_UpperCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_UpperCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_UpperCAmelCase = {"""count""": n_errors, """errors""": error_counts}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = """| no. | error | status |"""
_UpperCAmelCase = """|-:|:-|:-|"""
_UpperCAmelCase = [header, sep]
for error in reduced_by_error:
_UpperCAmelCase = reduced_by_error[error]["""count"""]
_UpperCAmelCase = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(_A )
return "\n".join(_A )
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = """| model | no. of errors | major error | count |"""
_UpperCAmelCase = """|-:|-:|-:|-:|"""
_UpperCAmelCase = [header, sep]
for model in reduced_by_model:
_UpperCAmelCase = reduced_by_model[model]["""count"""]
_UpperCAmelCase ,_UpperCAmelCase = list(reduced_by_model[model]["""errors"""].items() )[0]
_UpperCAmelCase = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(_A )
return "\n".join(_A )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
a : Dict = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a : Tuple = get_job_links(args.workflow_run_id, token=args.token)
a : Tuple = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a : List[Any] = k.find(''' / ''')
a : Tuple = k[index + len(''' / ''') :]
a : int = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a : Union[str, Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a : Optional[int] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a : int = reduce_by_error(errors)
a : str = reduce_by_model(errors)
a : int = make_github_table(reduced_by_error)
a : Optional[int] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa) | 19 | 0 |
"""simple docstring"""
from ...utils import is_torch_available, is_transformers_available
if is_transformers_available() and is_torch_available():
from .pipeline_vq_diffusion import LearnedClassifierFreeSamplingEmbeddings, VQDiffusionPipeline | 702 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( _UpperCAmelCase ):
a : Any = ['image_processor', 'tokenizer']
a : Optional[int] = 'AutoImageProcessor'
a : Any = 'AutoTokenizer'
def __init__( self : List[str] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=None , **__UpperCamelCase : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop("""feature_extractor""" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def __call__( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->List[str]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""images""" , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""text""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_UpperCAmelCase = self.image_processor(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if text is not None:
_UpperCAmelCase = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCAmelCase = encodings["""input_ids"""]
return inputs
def _snake_case ( self : Union[str, Any] , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ) ->Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ) ->int:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@contextmanager
def _snake_case ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer
yield
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def _snake_case ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Union[str, Any]=None ) ->List[str]:
'''simple docstring'''
if added_vocab is None:
_UpperCAmelCase = self.tokenizer.get_added_vocab()
_UpperCAmelCase = {}
while tokens:
_UpperCAmelCase = re.search(r"""<s_(.*?)>""" , __UpperCamelCase , re.IGNORECASE )
if start_token is None:
break
_UpperCAmelCase = start_token.group(1 )
_UpperCAmelCase = re.search(rf"""</s_{key}>""" , __UpperCamelCase , re.IGNORECASE )
_UpperCAmelCase = start_token.group()
if end_token is None:
_UpperCAmelCase = tokens.replace(__UpperCamelCase , """""" )
else:
_UpperCAmelCase = end_token.group()
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __UpperCamelCase , re.IGNORECASE )
if content is not None:
_UpperCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_UpperCAmelCase = self.tokenajson(__UpperCamelCase , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if value:
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = value[0]
_UpperCAmelCase = value
else: # leaf nodes
_UpperCAmelCase = []
for leaf in content.split(r"""<sep/>""" ):
_UpperCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_UpperCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCamelCase )
if len(output[key] ) == 1:
_UpperCAmelCase = output[key][0]
_UpperCAmelCase = tokens[tokens.find(__UpperCamelCase ) + len(__UpperCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if len(__UpperCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , )
return self.image_processor | 19 | 0 |
"""simple docstring"""
import math
def _UpperCamelCase ( _A ) -> bool:
"""simple docstring"""
_UpperCAmelCase = math.loga(math.sqrt(4 * positive_integer + 1 ) / 2 + 1 / 2 )
return exponent == int(_A )
def _UpperCamelCase ( _A = 1 / 1_2_3_4_5 ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 3
while True:
_UpperCAmelCase = (integer**2 - 1) / 4
# if candidate is an integer, then there is a partition for k
if partition_candidate == int(_A ):
_UpperCAmelCase = int(_A )
total_partitions += 1
if check_partition_perfect(_A ):
perfect_partitions += 1
if perfect_partitions > 0:
if perfect_partitions / total_partitions < max_proportion:
return int(_A )
integer += 1
if __name__ == "__main__":
print(F"{solution() = }") | 703 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( _A , _A , _A ) -> float:
"""simple docstring"""
_UpperCAmelCase = x
_UpperCAmelCase = y
for step in range(_A ): # noqa: B007
_UpperCAmelCase = a * a - b * b + x
_UpperCAmelCase = 2 * a * b + y
_UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_A , 1 , 1 ) )
def _UpperCamelCase ( _A = 8_0_0 , _A = 6_0_0 , _A = -0.6 , _A = 0 , _A = 3.2 , _A = 5_0 , _A = True , ) -> Image.Image:
"""simple docstring"""
_UpperCAmelCase = Image.new("""RGB""" , (image_width, image_height) )
_UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(_A ):
for image_y in range(_A ):
# determine the figure-coordinates based on the image-coordinates
_UpperCAmelCase = figure_width / image_width * image_height
_UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
_UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
_UpperCAmelCase = get_distance(_A , _A , _A )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_UpperCAmelCase = get_color_coded_rgb(_A )
else:
_UpperCAmelCase = get_black_and_white_rgb(_A )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 19 | 0 |
"""simple docstring"""
import hashlib
import unittest
from typing import Dict
import numpy as np
from transformers import (
MODEL_FOR_MASK_GENERATION_MAPPING,
TF_MODEL_FOR_MASK_GENERATION_MAPPING,
is_vision_available,
pipeline,
)
from transformers.pipelines import MaskGenerationPipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
if is_vision_available():
from PIL import Image
else:
class a_ :
@staticmethod
def _snake_case ( *__UpperCamelCase : Any , **__UpperCamelCase : Any ) ->str:
'''simple docstring'''
pass
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = hashlib.mda(image.tobytes() )
return m.hexdigest()[:1_0]
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = np.array(_A )
_UpperCAmelCase = npimg.shape
return {"hash": hashimage(_A ), "shape": shape}
@is_pipeline_test
@require_vision
@require_torch
class a_ ( unittest.TestCase ):
a : List[Any] = dict(
(list(MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if MODEL_FOR_MASK_GENERATION_MAPPING else []) )
a : List[Any] = dict(
(list(TF_MODEL_FOR_MASK_GENERATION_MAPPING.items() ) if TF_MODEL_FOR_MASK_GENERATION_MAPPING else []) )
def _snake_case ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = MaskGenerationPipeline(model=__UpperCamelCase , image_processor=__UpperCamelCase )
return image_segmenter, [
"./tests/fixtures/tests_samples/COCO/000000039769.png",
"./tests/fixtures/tests_samples/COCO/000000039769.png",
]
def _snake_case ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] ) ->Dict:
'''simple docstring'''
pass
@require_tf
@unittest.skip("""Image segmentation not implemented in TF""" )
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
pass
@slow
@require_torch
def _snake_case ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = pipeline("""mask-generation""" , model="""facebook/sam-vit-huge""" )
_UpperCAmelCase = image_segmenter("""http://images.cocodataset.org/val2017/000000039769.jpg""" , points_per_batch=2_56 )
# Shortening by hashing
_UpperCAmelCase = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCamelCase ), "scores": outputs["scores"][i]}]
# fmt: off
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0_4_4_4},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.0_2_1},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0_1_6_7},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0_1_3_2},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0_0_5_3},
{"""mask""": {"""hash""": """e2d0b7a0b7""", """shape""": (4_80, 6_40)}, """scores""": 0.9_9_6_7},
{"""mask""": {"""hash""": """453c7844bd""", """shape""": (4_80, 6_40)}, """scores""": 0.9_9_3},
{"""mask""": {"""hash""": """3d44f2926d""", """shape""": (4_80, 6_40)}, """scores""": 0.9_9_0_9},
{"""mask""": {"""hash""": """64033ddc3f""", """shape""": (4_80, 6_40)}, """scores""": 0.9_8_7_9},
{"""mask""": {"""hash""": """801064ff79""", """shape""": (4_80, 6_40)}, """scores""": 0.9_8_3_4},
{"""mask""": {"""hash""": """6172f276ef""", """shape""": (4_80, 6_40)}, """scores""": 0.9_7_1_6},
{"""mask""": {"""hash""": """b49e60e084""", """shape""": (4_80, 6_40)}, """scores""": 0.9_6_1_2},
{"""mask""": {"""hash""": """a811e775fd""", """shape""": (4_80, 6_40)}, """scores""": 0.9_5_9_9},
{"""mask""": {"""hash""": """a6a8ebcf4b""", """shape""": (4_80, 6_40)}, """scores""": 0.9_5_5_2},
{"""mask""": {"""hash""": """9d8257e080""", """shape""": (4_80, 6_40)}, """scores""": 0.9_5_3_2},
{"""mask""": {"""hash""": """32de6454a8""", """shape""": (4_80, 6_40)}, """scores""": 0.9_5_1_6},
{"""mask""": {"""hash""": """af3d4af2c8""", """shape""": (4_80, 6_40)}, """scores""": 0.9_4_9_9},
{"""mask""": {"""hash""": """3c6db475fb""", """shape""": (4_80, 6_40)}, """scores""": 0.9_4_8_3},
{"""mask""": {"""hash""": """c290813fb9""", """shape""": (4_80, 6_40)}, """scores""": 0.9_4_6_4},
{"""mask""": {"""hash""": """b6f0b8f606""", """shape""": (4_80, 6_40)}, """scores""": 0.9_4_3},
{"""mask""": {"""hash""": """92ce16bfdf""", """shape""": (4_80, 6_40)}, """scores""": 0.9_4_3},
{"""mask""": {"""hash""": """c749b25868""", """shape""": (4_80, 6_40)}, """scores""": 0.9_4_0_8},
{"""mask""": {"""hash""": """efb6cab859""", """shape""": (4_80, 6_40)}, """scores""": 0.9_3_3_5},
{"""mask""": {"""hash""": """1ff2eafb30""", """shape""": (4_80, 6_40)}, """scores""": 0.9_3_2_6},
{"""mask""": {"""hash""": """788b798e24""", """shape""": (4_80, 6_40)}, """scores""": 0.9_2_6_2},
{"""mask""": {"""hash""": """abea804f0e""", """shape""": (4_80, 6_40)}, """scores""": 0.8_9_9_9},
{"""mask""": {"""hash""": """7b9e8ddb73""", """shape""": (4_80, 6_40)}, """scores""": 0.8_9_8_6},
{"""mask""": {"""hash""": """cd24047c8a""", """shape""": (4_80, 6_40)}, """scores""": 0.8_9_8_4},
{"""mask""": {"""hash""": """6943e6bcbd""", """shape""": (4_80, 6_40)}, """scores""": 0.8_8_7_3},
{"""mask""": {"""hash""": """b5f47c9191""", """shape""": (4_80, 6_40)}, """scores""": 0.8_8_7_1}
] , )
# fmt: on
@require_torch
@slow
def _snake_case ( self : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = """facebook/sam-vit-huge"""
_UpperCAmelCase = pipeline("""mask-generation""" , model=__UpperCamelCase )
_UpperCAmelCase = image_segmenter(
"""http://images.cocodataset.org/val2017/000000039769.jpg""" , pred_iou_thresh=1 , points_per_batch=2_56 )
# Shortening by hashing
_UpperCAmelCase = []
for i, o in enumerate(outputs["""masks"""] ):
new_outupt += [{"mask": mask_to_test_readable(__UpperCamelCase ), "scores": outputs["scores"][i]}]
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [
{"""mask""": {"""hash""": """115ad19f5f""", """shape""": (4_80, 6_40)}, """scores""": 1.0_4_4_4},
{"""mask""": {"""hash""": """6affa964c6""", """shape""": (4_80, 6_40)}, """scores""": 1.0_2_1_0},
{"""mask""": {"""hash""": """dfe28a0388""", """shape""": (4_80, 6_40)}, """scores""": 1.0_1_6_7},
{"""mask""": {"""hash""": """c0a5f4a318""", """shape""": (4_80, 6_40)}, """scores""": 1.0_1_3_2},
{"""mask""": {"""hash""": """fe8065c197""", """shape""": (4_80, 6_40)}, """scores""": 1.0_0_5_3},
] , ) | 704 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a_ ( nn.Module ):
def __init__( self : List[str] , __UpperCamelCase : int = 16 , __UpperCamelCase : int = 88 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 32 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "geglu" , __UpperCamelCase : Optional[int] = None , ) ->Dict:
'''simple docstring'''
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__UpperCamelCase , attention_head_dim=__UpperCamelCase , in_channels=__UpperCamelCase , num_layers=__UpperCamelCase , dropout=__UpperCamelCase , norm_num_groups=__UpperCamelCase , cross_attention_dim=__UpperCamelCase , attention_bias=__UpperCamelCase , sample_size=__UpperCamelCase , num_vector_embeds=__UpperCamelCase , activation_fn=__UpperCamelCase , num_embeds_ada_norm=__UpperCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def _snake_case ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : bool = True , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase , cross_attention_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
from __future__ import annotations
def _UpperCamelCase ( _A ) -> bool:
"""simple docstring"""
if len(_A ) < 2:
raise ValueError("""Monogons and Digons are not polygons in the Euclidean space""" )
if any(i <= 0 for i in nums ):
raise ValueError("""All values must be greater than 0""" )
_UpperCAmelCase = nums.copy()
copy_nums.sort()
return copy_nums[-1] < sum(copy_nums[:-1] )
if __name__ == "__main__":
import doctest
doctest.testmod() | 705 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( _A , _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = LxmertConfig.from_json_file(_A )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase = LxmertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_A , _A , _A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 19 | 0 |
from typing import Optional, Tuple, Union
import flax
import flax.linen as nn
import jax
import jax.numpy as jnp
from flax.core.frozen_dict import FrozenDict
from ..configuration_utils import ConfigMixin, flax_register_to_config
from ..utils import BaseOutput
from .embeddings_flax import FlaxTimestepEmbedding, FlaxTimesteps
from .modeling_flax_utils import FlaxModelMixin
from .unet_ad_blocks_flax import (
FlaxCrossAttnDownBlockaD,
FlaxCrossAttnUpBlockaD,
FlaxDownBlockaD,
FlaxUNetMidBlockaDCrossAttn,
FlaxUpBlockaD,
)
@flax.struct.dataclass
class a_ ( _UpperCAmelCase ):
a : jnp.ndarray
@flax_register_to_config
class a_ ( nn.Module , _UpperCAmelCase , _UpperCAmelCase ):
a : int = 32
a : int = 4
a : int = 4
a : Tuple[str] = (
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"CrossAttnDownBlock2D",
"DownBlock2D",
)
a : Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")
a : Union[bool, Tuple[bool]] = False
a : Tuple[int] = (320, 640, 1280, 1280)
a : int = 2
a : Union[int, Tuple[int]] = 8
a : Optional[Union[int, Tuple[int]]] = None
a : int = 1280
a : float = 0.0
a : bool = False
a : jnp.dtype = jnp.floataa
a : bool = True
a : int = 0
a : bool = False
def _snake_case ( self : Tuple , __UpperCamelCase : jax.random.KeyArray ) ->FrozenDict:
'''simple docstring'''
_UpperCAmelCase = (1, self.in_channels, self.sample_size, self.sample_size)
_UpperCAmelCase = jnp.zeros(__UpperCamelCase , dtype=jnp.floataa )
_UpperCAmelCase = jnp.ones((1,) , dtype=jnp.intaa )
_UpperCAmelCase = jnp.zeros((1, 1, self.cross_attention_dim) , dtype=jnp.floataa )
_UpperCAmelCase ,_UpperCAmelCase = jax.random.split(__UpperCamelCase )
_UpperCAmelCase = {"""params""": params_rng, """dropout""": dropout_rng}
return self.init(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )["params"]
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.block_out_channels
_UpperCAmelCase = block_out_channels[0] * 4
if self.num_attention_heads is not None:
raise ValueError(
"""At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19.""" )
# If `num_attention_heads` is not defined (which is the case for most models)
# it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
# The reason for this behavior is to correct for incorrectly named variables that were introduced
# when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
# Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
# which is why we correct for the naming here.
_UpperCAmelCase = self.num_attention_heads or self.attention_head_dim
# input
_UpperCAmelCase = nn.Conv(
block_out_channels[0] , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
# time
_UpperCAmelCase = FlaxTimesteps(
block_out_channels[0] , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.config.freq_shift )
_UpperCAmelCase = FlaxTimestepEmbedding(__UpperCamelCase , dtype=self.dtype )
_UpperCAmelCase = self.only_cross_attention
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = (only_cross_attention,) * len(self.down_block_types )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = (num_attention_heads,) * len(self.down_block_types )
# down
_UpperCAmelCase = []
_UpperCAmelCase = block_out_channels[0]
for i, down_block_type in enumerate(self.down_block_types ):
_UpperCAmelCase = output_channel
_UpperCAmelCase = block_out_channels[i]
_UpperCAmelCase = i == len(__UpperCamelCase ) - 1
if down_block_type == "CrossAttnDownBlock2D":
_UpperCAmelCase = FlaxCrossAttnDownBlockaD(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , num_attention_heads=num_attention_heads[i] , add_downsample=not is_final_block , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCAmelCase = FlaxDownBlockaD(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , dropout=self.dropout , num_layers=self.layers_per_block , add_downsample=not is_final_block , dtype=self.dtype , )
down_blocks.append(__UpperCamelCase )
_UpperCAmelCase = down_blocks
# mid
_UpperCAmelCase = FlaxUNetMidBlockaDCrossAttn(
in_channels=block_out_channels[-1] , dropout=self.dropout , num_attention_heads=num_attention_heads[-1] , use_linear_projection=self.use_linear_projection , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
# up
_UpperCAmelCase = []
_UpperCAmelCase = list(reversed(__UpperCamelCase ) )
_UpperCAmelCase = list(reversed(__UpperCamelCase ) )
_UpperCAmelCase = list(reversed(__UpperCamelCase ) )
_UpperCAmelCase = reversed_block_out_channels[0]
for i, up_block_type in enumerate(self.up_block_types ):
_UpperCAmelCase = output_channel
_UpperCAmelCase = reversed_block_out_channels[i]
_UpperCAmelCase = reversed_block_out_channels[min(i + 1 , len(__UpperCamelCase ) - 1 )]
_UpperCAmelCase = i == len(__UpperCamelCase ) - 1
if up_block_type == "CrossAttnUpBlock2D":
_UpperCAmelCase = FlaxCrossAttnUpBlockaD(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , prev_output_channel=__UpperCamelCase , num_layers=self.layers_per_block + 1 , num_attention_heads=reversed_num_attention_heads[i] , add_upsample=not is_final_block , dropout=self.dropout , use_linear_projection=self.use_linear_projection , only_cross_attention=only_cross_attention[i] , use_memory_efficient_attention=self.use_memory_efficient_attention , dtype=self.dtype , )
else:
_UpperCAmelCase = FlaxUpBlockaD(
in_channels=__UpperCamelCase , out_channels=__UpperCamelCase , prev_output_channel=__UpperCamelCase , num_layers=self.layers_per_block + 1 , add_upsample=not is_final_block , dropout=self.dropout , dtype=self.dtype , )
up_blocks.append(__UpperCamelCase )
_UpperCAmelCase = output_channel
_UpperCAmelCase = up_blocks
# out
_UpperCAmelCase = nn.GroupNorm(num_groups=32 , epsilon=1e-5 )
_UpperCAmelCase = nn.Conv(
self.out_channels , kernel_size=(3, 3) , strides=(1, 1) , padding=((1, 1), (1, 1)) , dtype=self.dtype , )
def __call__( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int=None , __UpperCamelCase : Any=None , __UpperCamelCase : bool = True , __UpperCamelCase : bool = False , ) ->Union[FlaxUNetaDConditionOutput, Tuple]:
'''simple docstring'''
if not isinstance(__UpperCamelCase , jnp.ndarray ):
_UpperCAmelCase = jnp.array([timesteps] , dtype=jnp.intaa )
elif isinstance(__UpperCamelCase , jnp.ndarray ) and len(timesteps.shape ) == 0:
_UpperCAmelCase = timesteps.astype(dtype=jnp.floataa )
_UpperCAmelCase = jnp.expand_dims(__UpperCamelCase , 0 )
_UpperCAmelCase = self.time_proj(__UpperCamelCase )
_UpperCAmelCase = self.time_embedding(__UpperCamelCase )
# 2. pre-process
_UpperCAmelCase = jnp.transpose(__UpperCamelCase , (0, 2, 3, 1) )
_UpperCAmelCase = self.conv_in(__UpperCamelCase )
# 3. down
_UpperCAmelCase = (sample,)
for down_block in self.down_blocks:
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase ,_UpperCAmelCase = down_block(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , deterministic=not train )
else:
_UpperCAmelCase ,_UpperCAmelCase = down_block(__UpperCamelCase , __UpperCamelCase , deterministic=not train )
down_block_res_samples += res_samples
if down_block_additional_residuals is not None:
_UpperCAmelCase = ()
for down_block_res_sample, down_block_additional_residual in zip(
__UpperCamelCase , __UpperCamelCase ):
down_block_res_sample += down_block_additional_residual
new_down_block_res_samples += (down_block_res_sample,)
_UpperCAmelCase = new_down_block_res_samples
# 4. mid
_UpperCAmelCase = self.mid_block(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , deterministic=not train )
if mid_block_additional_residual is not None:
sample += mid_block_additional_residual
# 5. up
for up_block in self.up_blocks:
_UpperCAmelCase = down_block_res_samples[-(self.layers_per_block + 1) :]
_UpperCAmelCase = down_block_res_samples[: -(self.layers_per_block + 1)]
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = up_block(
__UpperCamelCase , temb=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , res_hidden_states_tuple=__UpperCamelCase , deterministic=not train , )
else:
_UpperCAmelCase = up_block(__UpperCamelCase , temb=__UpperCamelCase , res_hidden_states_tuple=__UpperCamelCase , deterministic=not train )
# 6. post-process
_UpperCAmelCase = self.conv_norm_out(__UpperCamelCase )
_UpperCAmelCase = nn.silu(__UpperCamelCase )
_UpperCAmelCase = self.conv_out(__UpperCamelCase )
_UpperCAmelCase = jnp.transpose(__UpperCamelCase , (0, 3, 1, 2) )
if not return_dict:
return (sample,)
return FlaxUNetaDConditionOutput(sample=__UpperCamelCase ) | 706 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
a : str = '''examples/'''
a : List[str] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a : Tuple = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
a : List[str] = '''README.md'''
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase ,_UpperCAmelCase = REPLACE_PATTERNS[pattern]
_UpperCAmelCase = replace.replace("""VERSION""" , _A )
_UpperCAmelCase = re_pattern.sub(_A , _A )
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_A )
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern="""examples""" )
def _UpperCamelCase ( _A , _A=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
_UpperCAmelCase = """🤗 Transformers currently provides the following architectures"""
_UpperCAmelCase = """1. Want to contribute a new model?"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase = f.readlines()
# Find the start of the list.
_UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
_UpperCAmelCase = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_A )
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = REPLACE_PATTERNS["""init"""][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def _UpperCamelCase ( _A=False ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
_UpperCAmelCase = default_version.base_version
elif patch:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_UpperCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
_UpperCAmelCase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
def _UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_version()
_UpperCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
_UpperCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
_UpperCAmelCase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_A )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work() | 19 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Any = {
'''configuration_blenderbot''': [
'''BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''BlenderbotConfig''',
'''BlenderbotOnnxConfig''',
],
'''tokenization_blenderbot''': ['''BlenderbotTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['''BlenderbotTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = [
'''BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BlenderbotForCausalLM''',
'''BlenderbotForConditionalGeneration''',
'''BlenderbotModel''',
'''BlenderbotPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
'''TFBlenderbotForConditionalGeneration''',
'''TFBlenderbotModel''',
'''TFBlenderbotPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
'''FlaxBlenderbotForConditionalGeneration''',
'''FlaxBlenderbotModel''',
'''FlaxBlenderbotPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_blenderbot import (
BLENDERBOT_PRETRAINED_CONFIG_ARCHIVE_MAP,
BlenderbotConfig,
BlenderbotOnnxConfig,
)
from .tokenization_blenderbot import BlenderbotTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_blenderbot_fast import BlenderbotTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_blenderbot import (
BLENDERBOT_PRETRAINED_MODEL_ARCHIVE_LIST,
BlenderbotForCausalLM,
BlenderbotForConditionalGeneration,
BlenderbotModel,
BlenderbotPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_blenderbot import (
TFBlenderbotForConditionalGeneration,
TFBlenderbotModel,
TFBlenderbotPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_blenderbot import (
FlaxBlenderbotForConditionalGeneration,
FlaxBlenderbotModel,
FlaxBlenderbotPreTrainedModel,
)
else:
import sys
a : int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 707 |
"""simple docstring"""
from __future__ import annotations
def _UpperCamelCase ( _A ) -> None:
"""simple docstring"""
create_state_space_tree(_A , [] , 0 , [0 for i in range(len(_A ) )] )
def _UpperCamelCase ( _A , _A , _A , _A , ) -> None:
"""simple docstring"""
if index == len(_A ):
print(_A )
return
for i in range(len(_A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_UpperCAmelCase = True
create_state_space_tree(_A , _A , index + 1 , _A )
current_sequence.pop()
_UpperCAmelCase = False
a : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
a : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 19 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_segformer import SegformerImageProcessor
a : Optional[int] = logging.get_logger(__name__)
class a_ ( _UpperCAmelCase ):
def __init__( self : str , *__UpperCamelCase : Any , **__UpperCamelCase : Tuple ) ->None:
'''simple docstring'''
warnings.warn(
"""The class SegformerFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use SegformerImageProcessor instead.""" , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase ) | 708 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : int=32 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : Dict=3 , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=32 , __UpperCamelCase : Any=4 , __UpperCamelCase : Optional[int]=[0, 1, 2, 3] , __UpperCamelCase : str=4 , __UpperCamelCase : Optional[Any]=37 , __UpperCamelCase : str="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Any=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : int=[1, 3_84, 24, 24] , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any=None , ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = backbone_out_indices
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = backbone_featmap_shape
_UpperCAmelCase = scope
_UpperCAmelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 1_92, 3_84, 7_68],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _snake_case ( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _snake_case ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a : int = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : str = False
a : List[str] = False
a : Dict = False
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = DPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def _snake_case ( self : Optional[int] ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
pass
def _snake_case ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def _snake_case ( self : str ) ->Any:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ):
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = False
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
_UpperCAmelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_UpperCAmelCase = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self : Dict ) ->Tuple:
'''simple docstring'''
pass
@slow
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_UpperCAmelCase = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = """add"""
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class a_ ( unittest.TestCase ):
def _snake_case ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
_UpperCAmelCase = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
_UpperCAmelCase = outputs.predicted_depth
# verify the predicted depth
_UpperCAmelCase = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , __UpperCamelCase , atol=1e-4 ) ) | 19 | 0 |
"""simple docstring"""
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = [3_1, 2_8, 3_1, 3_0, 3_1, 3_0, 3_1, 3_1, 3_0, 3_1, 3_0, 3_1]
_UpperCAmelCase = 6
_UpperCAmelCase = 1
_UpperCAmelCase = 1_9_0_1
_UpperCAmelCase = 0
while year < 2_0_0_1:
day += 7
if (year % 4 == 0 and year % 1_0_0 != 0) or (year % 4_0_0 == 0):
if day > days_per_month[month - 1] and month != 2:
month += 1
_UpperCAmelCase = day - days_per_month[month - 2]
elif day > 2_9 and month == 2:
month += 1
_UpperCAmelCase = day - 2_9
else:
if day > days_per_month[month - 1]:
month += 1
_UpperCAmelCase = day - days_per_month[month - 2]
if month > 1_2:
year += 1
_UpperCAmelCase = 1
if year < 2_0_0_1 and day == 1:
sundays += 1
return sundays
if __name__ == "__main__":
print(solution()) | 709 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a : List[str] = logging.get_logger(__name__)
class a_ ( enum.Enum ):
a : Optional[Any] = 0
a : Dict = 1
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'generated'
def __init__( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : str ) ->Any:
'''simple docstring'''
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _snake_case ( self : Optional[int] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Dict=None , **__UpperCamelCase : Any , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
if truncation is not None:
_UpperCAmelCase = truncation
_UpperCAmelCase = generate_kwargs
_UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
_UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCAmelCase = self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
_UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _snake_case ( self : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
return True
def _snake_case ( self : Optional[Any] , *__UpperCamelCase : Any , __UpperCamelCase : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
_UpperCAmelCase = ([prefix + arg for arg in args[0]],)
_UpperCAmelCase = True
elif isinstance(args[0] , __UpperCamelCase ):
_UpperCAmelCase = (prefix + args[0],)
_UpperCAmelCase = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
_UpperCAmelCase = self.tokenizer(*__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Dict , *__UpperCamelCase : str , **__UpperCamelCase : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = super().__call__(*__UpperCamelCase , **__UpperCamelCase )
if (
isinstance(args[0] , __UpperCamelCase )
and all(isinstance(__UpperCamelCase , __UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _snake_case ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : str=TruncationStrategy.DO_NOT_TRUNCATE , **__UpperCamelCase : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self._parse_and_tokenize(__UpperCamelCase , truncation=__UpperCamelCase , **__UpperCamelCase )
return inputs
def _snake_case ( self : str , __UpperCamelCase : Dict , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
if self.framework == "pt":
_UpperCAmelCase ,_UpperCAmelCase = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
_UpperCAmelCase ,_UpperCAmelCase = tf.shape(model_inputs["""input_ids"""] ).numpy()
_UpperCAmelCase = generate_kwargs.get("""min_length""" , self.model.config.min_length )
_UpperCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__UpperCamelCase , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
_UpperCAmelCase = self.model.generate(**__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCAmelCase = output_ids.reshape(__UpperCamelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
_UpperCAmelCase = tf.reshape(__UpperCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any]=ReturnType.TEXT , __UpperCamelCase : int=False ) ->Any:
'''simple docstring'''
_UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCAmelCase = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
_UpperCAmelCase = {
f"""{self.return_name}_text""": self.tokenizer.decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , )
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'summary'
def __call__( self : Optional[Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[int] ) ->Any:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : str , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : Optional[int] = 'translation'
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def _snake_case ( self : Tuple , *__UpperCamelCase : List[str] , __UpperCamelCase : Tuple=TruncationStrategy.DO_NOT_TRUNCATE , __UpperCamelCase : Tuple=None , __UpperCamelCase : Union[str, Any]=None ) ->Tuple:
'''simple docstring'''
if getattr(self.tokenizer , """_build_translation_inputs""" , __UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase , return_tensors=self.framework , truncation=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase , truncation=__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : int=None , __UpperCamelCase : int=None , **__UpperCamelCase : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
_UpperCAmelCase = src_lang
if tgt_lang is not None:
_UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCAmelCase = kwargs.get("""task""" , self.task )
_UpperCAmelCase = task.split("""_""" )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
_UpperCAmelCase = items[1]
_UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->int:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
from transformers import HfArgumentParser, TensorFlowBenchmark, TensorFlowBenchmarkArguments
def _UpperCamelCase ( ) -> str:
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser(_A )
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase = TensorFlowBenchmark(args=_A )
try:
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
except ValueError as e:
_UpperCAmelCase = """Arg --no_{0} is no longer used, please use --no-{0} instead."""
_UpperCAmelCase = """ """.join(str(_A ).split(""" """ )[:-1] )
_UpperCAmelCase = """"""
_UpperCAmelCase = eval(str(_A ).split(""" """ )[-1] )
_UpperCAmelCase = []
for arg in depreciated_args:
# arg[2:] removes '--'
if arg[2:] in TensorFlowBenchmark.deprecated_args:
# arg[5:] removes '--no_'
full_error_msg += arg_error_msg.format(arg[5:] )
else:
wrong_args.append(_A )
if len(_A ) > 0:
_UpperCAmelCase = full_error_msg + begin_error_msg + str(_A )
raise ValueError(_A )
benchmark.run()
if __name__ == "__main__":
main() | 710 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
@property
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__UpperCamelCase )
def _snake_case ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.dummy_uncond_unet
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = self.dummy_vq_model
_UpperCAmelCase = LDMPipeline(unet=__UpperCamelCase , vqvae=__UpperCamelCase , scheduler=__UpperCamelCase )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" ).images
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" , return_dict=__UpperCamelCase )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a_ ( unittest.TestCase ):
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=5 , output_type="""numpy""" ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCAmelCase = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 19 | 0 |
"""simple docstring"""
import baseaa
import io
import json
import os
from copy import deepcopy
from ..optimizer import AcceleratedOptimizer
from ..scheduler import AcceleratedScheduler
class a_ :
def __init__( self : Any , __UpperCamelCase : Optional[int] ) ->Tuple:
'''simple docstring'''
if isinstance(__UpperCamelCase , __UpperCamelCase ):
# Don't modify user's data should they want to reuse it (e.g. in tests), because once we
# modified it, it will not be accepted here again, since `auto` values would have been overridden
_UpperCAmelCase = deepcopy(__UpperCamelCase )
elif os.path.exists(__UpperCamelCase ):
with io.open(__UpperCamelCase , """r""" , encoding="""utf-8""" ) as f:
_UpperCAmelCase = json.load(__UpperCamelCase )
else:
try:
_UpperCAmelCase = baseaa.urlsafe_baadecode(__UpperCamelCase ).decode("""utf-8""" )
_UpperCAmelCase = json.loads(__UpperCamelCase )
except (UnicodeDecodeError, AttributeError, ValueError):
raise ValueError(
f"""Expected a string path to an existing deepspeed config, or a dictionary, or a base64 encoded string. Received: {config_file_or_dict}""" )
_UpperCAmelCase = config
self.set_stage_and_offload()
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.get_value("""zero_optimization.stage""" , -1 )
# offload
_UpperCAmelCase = False
if self.is_zeroa() or self.is_zeroa():
_UpperCAmelCase = set(["""cpu""", """nvme"""] )
_UpperCAmelCase = set(
[
self.get_value("""zero_optimization.offload_optimizer.device""" ),
self.get_value("""zero_optimization.offload_param.device""" ),
] )
if len(offload_devices & offload_devices_valid ) > 0:
_UpperCAmelCase = True
def _snake_case ( self : Optional[int] , __UpperCamelCase : Optional[int] ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.config
# find the config node of interest if it exists
_UpperCAmelCase = ds_key_long.split(""".""" )
_UpperCAmelCase = nodes.pop()
for node in nodes:
_UpperCAmelCase = config.get(__UpperCamelCase )
if config is None:
return None, ds_key
return config, ds_key
def _snake_case ( self : Tuple , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict=None ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.find_config_node(__UpperCamelCase )
if config is None:
return default
return config.get(__UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : int , __UpperCamelCase : int , __UpperCamelCase : List[Any]=False ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.config
# find the config node of interest if it exists
_UpperCAmelCase = ds_key_long.split(""".""" )
for node in nodes:
_UpperCAmelCase = config
_UpperCAmelCase = config.get(__UpperCamelCase )
if config is None:
if must_exist:
raise ValueError(f"""Can't find {ds_key_long} entry in the config: {self.config}""" )
else:
return
# if found remove it
if parent_config is not None:
parent_config.pop(__UpperCamelCase )
def _snake_case ( self : str , __UpperCamelCase : str ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.get_value(__UpperCamelCase )
return False if value is None else bool(__UpperCamelCase )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.get_value(__UpperCamelCase )
return False if value is None else not bool(__UpperCamelCase )
def _snake_case ( self : List[str] ) ->List[Any]:
'''simple docstring'''
return self._stage == 2
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
return self._stage == 3
def _snake_case ( self : Any ) ->List[str]:
'''simple docstring'''
return self._offload
class a_ :
def __init__( self : List[str] , __UpperCamelCase : Any ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = engine
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[Any] ) ->str:
'''simple docstring'''
self.engine.backward(__UpperCamelCase , **__UpperCamelCase )
# Deepspeed's `engine.step` performs the following operations:
# - gradient accumulation check
# - gradient clipping
# - optimizer step
# - zero grad
# - checking overflow
# - lr_scheduler step (only if engine.lr_scheduler is not None)
self.engine.step()
# and this plugin overrides the above calls with no-ops when Accelerate runs under
# Deepspeed, but allows normal functionality for non-Deepspeed cases thus enabling a simple
# training loop that works transparently under many training regimes.
class a_ ( _UpperCAmelCase ):
def __init__( self : List[str] , __UpperCamelCase : Tuple ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(__UpperCamelCase , device_placement=__UpperCamelCase , scaler=__UpperCamelCase )
_UpperCAmelCase = hasattr(self.optimizer , """overflow""" )
def _snake_case ( self : Dict , __UpperCamelCase : str=None ) ->Optional[int]:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
def _snake_case ( self : Optional[Any] ) ->List[str]:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
@property
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
if self.__has_overflow__:
return self.optimizer.overflow
return False
class a_ ( _UpperCAmelCase ):
def __init__( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] ) ->List[Any]:
'''simple docstring'''
super().__init__(__UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
pass # `accelerator.backward(loss)` is doing that automatically. Therefore, its implementation is not needed
class a_ :
def __init__( self : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : int=0.0_0_1 , __UpperCamelCase : Dict=0 , **__UpperCamelCase : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = params
_UpperCAmelCase = lr
_UpperCAmelCase = weight_decay
_UpperCAmelCase = kwargs
class a_ :
def __init__( self : str , __UpperCamelCase : int , __UpperCamelCase : Tuple=None , __UpperCamelCase : List[Any]=0 , **__UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = optimizer
_UpperCAmelCase = total_num_steps
_UpperCAmelCase = warmup_num_steps
_UpperCAmelCase = kwargs | 711 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
a : str = True
except (ImportError, ModuleNotFoundError):
a : List[str] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
re.sub("""<n>""" , """""" , _A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_A ) ) | 19 | 0 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( _UpperCAmelCase ):
'''simple docstring'''
a : Any = ['image_processor', 'tokenizer']
a : Optional[int] = 'AutoImageProcessor'
a : Any = 'AutoTokenizer'
def __init__( self : List[str] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=None , **__UpperCamelCase : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop("""feature_extractor""" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def __call__( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->List[str]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""images""" , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""text""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_UpperCAmelCase = self.image_processor(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if text is not None:
_UpperCAmelCase = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCAmelCase = encodings["""input_ids"""]
return inputs
def _snake_case ( self : Union[str, Any] , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ) ->Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ) ->int:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@contextmanager
def _snake_case ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer
yield
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def _snake_case ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Union[str, Any]=None ) ->List[str]:
'''simple docstring'''
if added_vocab is None:
_UpperCAmelCase = self.tokenizer.get_added_vocab()
_UpperCAmelCase = {}
while tokens:
_UpperCAmelCase = re.search(r"""<s_(.*?)>""" , __UpperCamelCase , re.IGNORECASE )
if start_token is None:
break
_UpperCAmelCase = start_token.group(1 )
_UpperCAmelCase = re.search(rf"""</s_{key}>""" , __UpperCamelCase , re.IGNORECASE )
_UpperCAmelCase = start_token.group()
if end_token is None:
_UpperCAmelCase = tokens.replace(__UpperCamelCase , """""" )
else:
_UpperCAmelCase = end_token.group()
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __UpperCamelCase , re.IGNORECASE )
if content is not None:
_UpperCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_UpperCAmelCase = self.tokenajson(__UpperCamelCase , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if value:
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = value[0]
_UpperCAmelCase = value
else: # leaf nodes
_UpperCAmelCase = []
for leaf in content.split(r"""<sep/>""" ):
_UpperCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_UpperCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCamelCase )
if len(output[key] ) == 1:
_UpperCAmelCase = output[key][0]
_UpperCAmelCase = tokens[tokens.find(__UpperCamelCase ) + len(__UpperCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if len(__UpperCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , )
return self.image_processor | 712 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a : str = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a_ :
a : List[Any] = PegasusConfig
a : Dict = {}
a : List[Any] = 'gelu'
def __init__( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple=13 , __UpperCamelCase : Tuple=7 , __UpperCamelCase : Tuple=True , __UpperCamelCase : Any=False , __UpperCamelCase : Any=99 , __UpperCamelCase : Optional[int]=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Dict=37 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Optional[Any]=20 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Tuple=0 , ) ->int:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def _snake_case ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _snake_case ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _UpperCamelCase ( _A , _A , _A , _A=None , _A=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = np.not_equal(_A , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a_ ( _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a : Any = True
a : int = False
a : Union[str, Any] = False
a : Optional[int] = False
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase )
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(__UpperCamelCase : List[Any] , __UpperCamelCase : str=None , **__UpperCamelCase : int ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_UpperCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self : int ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__UpperCamelCase )
_UpperCAmelCase = np.ones((1, 1) )
_UpperCAmelCase = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_UpperCAmelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""np""" , truncation=__UpperCamelCase , max_length=5_12 , padding=__UpperCamelCase )
_UpperCAmelCase = model.generate(**__UpperCamelCase , num_beams=2 ).sequences
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert tgt_text == decoded | 19 | 0 |
"""simple docstring"""
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
if a < 0:
raise ValueError("""Input value must be a positive integer""" )
elif isinstance(_A , _A ):
raise TypeError("""Input value must be a 'int' type""" )
return bin(_A ).count("""1""" )
if __name__ == "__main__":
import doctest
doctest.testmod() | 713 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : Optional[int]=13 , __UpperCamelCase : List[str]=7 , __UpperCamelCase : List[Any]=9 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : str=True , __UpperCamelCase : int=False , __UpperCamelCase : int=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : List[Any]=8 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : str=0.0_0_2 , __UpperCamelCase : Union[str, Any]=1 , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : Tuple=0 , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Any=None , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = encoder_seq_length
_UpperCAmelCase = decoder_seq_length
# For common tests
_UpperCAmelCase = self.decoder_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = d_ff
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = decoder_start_token_id
_UpperCAmelCase = None
_UpperCAmelCase = decoder_layers
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
return TaConfig.from_pretrained("""google/umt5-base""" )
def _snake_case ( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : str=None , ) ->int:
'''simple docstring'''
if attention_mask is None:
_UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCamelCase )
if decoder_head_mask is None:
_UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
if cross_attn_head_mask is None:
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = config.num_attention_heads
_UpperCAmelCase = self.prepare_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, input_dict
def _snake_case ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : Tuple ) ->Dict:
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , )
_UpperCAmelCase = model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase )
_UpperCAmelCase = result.last_hidden_state
_UpperCAmelCase = result.past_key_values
_UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , ) ->str:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).get_decoder().to(__UpperCamelCase ).eval()
# first forward pass
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) + 1 )
_UpperCAmelCase ,_UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = model(__UpperCamelCase )["""last_hidden_state"""]
_UpperCAmelCase = model(__UpperCamelCase , past_key_values=__UpperCamelCase )["""last_hidden_state"""]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def _snake_case ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Dict , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).to(__UpperCamelCase ).half().eval()
_UpperCAmelCase = model(**__UpperCamelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__UpperCamelCase ).any().item() )
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a : Optional[Any] = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a : Any = True
a : Optional[int] = False
a : Any = False
a : Optional[int] = True
a : Optional[Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a : int = [0.8, 0.9]
def _snake_case ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = UMTaModel(config_and_inputs[0] ).to(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=__UpperCamelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs[0]
_UpperCAmelCase = UMTaForConditionalGeneration(__UpperCamelCase ).eval()
model.to(__UpperCamelCase )
_UpperCAmelCase = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__UpperCamelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
}
for attn_name, (name, mask) in zip(__UpperCamelCase , head_masking.items() ):
_UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCamelCase )
_UpperCAmelCase = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , **__UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _snake_case ( self : Tuple ) ->List[Any]:
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__UpperCamelCase ).to(__UpperCamelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__UpperCamelCase , legacy=__UpperCamelCase )
_UpperCAmelCase = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase ).input_ids
# fmt: off
_UpperCAmelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model.generate(input_ids.to(__UpperCamelCase ) )
_UpperCAmelCase = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
import gc
import unittest
import torch
from parameterized import parameterized
from diffusers import AutoencoderKL
from diffusers.utils import floats_tensor, load_hf_numpy, require_torch_gpu, slow, torch_all_close, torch_device
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
enable_full_determinism()
class a_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : Any = AutoencoderKL
a : Dict = 'sample'
a : List[str] = 1e-2
@property
def _snake_case ( self : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
return {"sample": image}
@property
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
return (3, 32, 32)
@property
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
return (3, 32, 32)
def _snake_case ( self : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = {
"""block_out_channels""": [32, 64],
"""in_channels""": 3,
"""out_channels""": 3,
"""down_block_types""": ["""DownEncoderBlock2D""", """DownEncoderBlock2D"""],
"""up_block_types""": ["""UpDecoderBlock2D""", """UpDecoderBlock2D"""],
"""latent_channels""": 4,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self : int ) ->Any:
'''simple docstring'''
pass
def _snake_case ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skipIf(torch_device == """mps""" , """Gradient checkpointing skipped on MPS""" )
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.prepare_init_args_and_inputs_for_common()
_UpperCAmelCase = self.model_class(**__UpperCamelCase )
model.to(__UpperCamelCase )
assert not model.is_gradient_checkpointing and model.training
_UpperCAmelCase = model(**__UpperCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model.zero_grad()
_UpperCAmelCase = torch.randn_like(__UpperCamelCase )
_UpperCAmelCase = (out - labels).mean()
loss.backward()
# re-instantiate the model now enabling gradient checkpointing
_UpperCAmelCase = self.model_class(**__UpperCamelCase )
# clone model
model_a.load_state_dict(model.state_dict() )
model_a.to(__UpperCamelCase )
model_a.enable_gradient_checkpointing()
assert model_a.is_gradient_checkpointing and model_a.training
_UpperCAmelCase = model_a(**__UpperCamelCase ).sample
# run the backwards pass on the model. For backwards pass, for simplicity purpose,
# we won't calculate the loss and rather backprop on out.sum()
model_a.zero_grad()
_UpperCAmelCase = (out_a - labels).mean()
loss_a.backward()
# compare the output and parameters gradients
self.assertTrue((loss - loss_a).abs() < 1e-5 )
_UpperCAmelCase = dict(model.named_parameters() )
_UpperCAmelCase = dict(model_a.named_parameters() )
for name, param in named_params.items():
self.assertTrue(torch_all_close(param.grad.data , named_params_a[name].grad.data , atol=5e-5 ) )
def _snake_case ( self : int ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__UpperCamelCase )
_UpperCAmelCase = model(**self.dummy_input )
assert image is not None, "Make sure output is not None"
def _snake_case ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = AutoencoderKL.from_pretrained("""fusing/autoencoder-kl-dummy""" )
_UpperCAmelCase = model.to(__UpperCamelCase )
model.eval()
if torch_device == "mps":
_UpperCAmelCase = torch.manual_seed(0 )
else:
_UpperCAmelCase = torch.Generator(device=__UpperCamelCase ).manual_seed(0 )
_UpperCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = image.to(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase , sample_posterior=__UpperCamelCase , generator=__UpperCamelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# Since the VAE Gaussian prior's generator is seeded on the appropriate device,
# the expected output slices are not the same for CPU and GPU.
if torch_device == "mps":
_UpperCAmelCase = torch.tensor(
[
-4.0078e-01,
-3.8323e-04,
-1.2681e-01,
-1.1462e-01,
2.0095e-01,
1.0893e-01,
-8.8247e-02,
-3.0361e-01,
-9.8644e-03,
] )
elif torch_device == "cpu":
_UpperCAmelCase = torch.tensor(
[-0.1_3_5_2, 0.0_8_7_8, 0.0_4_1_9, -0.0_8_1_8, -0.1_0_6_9, 0.0_6_8_8, -0.1_4_5_8, -0.4_4_4_6, -0.0_0_2_6] )
else:
_UpperCAmelCase = torch.tensor(
[-0.2_4_2_1, 0.4_6_4_2, 0.2_5_0_7, -0.0_4_3_8, 0.0_6_8_2, 0.3_1_6_0, -0.2_0_1_8, -0.0_7_2_7, 0.2_4_8_5] )
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1e-2 ) )
@slow
class a_ ( unittest.TestCase ):
def _snake_case ( self : int , __UpperCamelCase : List[str] , __UpperCamelCase : str ) ->str:
'''simple docstring'''
return f"""gaussian_noise_s={seed}_shape={"_".join([str(__UpperCamelCase ) for s in shape] )}.npy"""
def _snake_case ( self : List[str] ) ->str:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Tuple , __UpperCamelCase : Tuple=0 , __UpperCamelCase : List[Any]=(4, 3, 5_12, 5_12) , __UpperCamelCase : Optional[Any]=False ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = torch.floataa if fpaa else torch.floataa
_UpperCAmelCase = torch.from_numpy(load_hf_numpy(self.get_file_format(__UpperCamelCase , __UpperCamelCase ) ) ).to(__UpperCamelCase ).to(__UpperCamelCase )
return image
def _snake_case ( self : Dict , __UpperCamelCase : Optional[int]="CompVis/stable-diffusion-v1-4" , __UpperCamelCase : int=False ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = """fp16""" if fpaa else None
_UpperCAmelCase = torch.floataa if fpaa else torch.floataa
_UpperCAmelCase = AutoencoderKL.from_pretrained(
__UpperCamelCase , subfolder="""vae""" , torch_dtype=__UpperCamelCase , revision=__UpperCamelCase , )
model.to(__UpperCamelCase ).eval()
return model
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Tuple=0 ) ->Optional[int]:
'''simple docstring'''
if torch_device == "mps":
return torch.manual_seed(__UpperCamelCase )
return torch.Generator(device=__UpperCamelCase ).manual_seed(__UpperCamelCase )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_3, 0.9_8_7_8, -0.0_4_9_5, -0.0_7_9_0, -0.2_7_0_9, 0.8_3_7_5, -0.2_0_6_0, -0.0_8_2_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_6, 0.1_1_6_8, 0.1_3_3_2, -0.4_8_4_0, -0.2_5_0_8, -0.0_7_9_1, -0.0_4_9_3, -0.4_0_8_9], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : Any , __UpperCamelCase : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.get_sd_vae_model()
_UpperCAmelCase = self.get_sd_image(__UpperCamelCase )
_UpperCAmelCase = self.get_generator(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase , generator=__UpperCamelCase , sample_posterior=__UpperCamelCase ).sample
assert sample.shape == image.shape
_UpperCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_UpperCAmelCase = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(__UpperCamelCase , __UpperCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[33, [-0.0_5_1_3, 0.0_2_8_9, 1.3_7_9_9, 0.2_1_6_6, -0.2_5_7_3, -0.0_8_7_1, 0.5_1_0_3, -0.0_9_9_9]],
[47, [-0.4_1_2_8, -0.1_3_2_0, -0.3_7_0_4, 0.1_9_6_5, -0.4_1_1_6, -0.2_3_3_2, -0.3_3_4_0, 0.2_2_4_7]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.get_sd_vae_model(fpaa=__UpperCamelCase )
_UpperCAmelCase = self.get_sd_image(__UpperCamelCase , fpaa=__UpperCamelCase )
_UpperCAmelCase = self.get_generator(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase , generator=__UpperCamelCase , sample_posterior=__UpperCamelCase ).sample
assert sample.shape == image.shape
_UpperCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_UpperCAmelCase = torch.tensor(__UpperCamelCase )
assert torch_all_close(__UpperCamelCase , __UpperCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.1_6_0_9, 0.9_8_6_6, -0.0_4_8_7, -0.0_7_7_7, -0.2_7_1_6, 0.8_3_6_8, -0.2_0_5_5, -0.0_8_1_4], [-0.2_3_9_5, 0.0_0_9_8, 0.0_1_0_2, -0.0_7_0_9, -0.2_8_4_0, -0.0_2_7_4, -0.0_7_1_8, -0.1_8_2_4]],
[47, [-0.2_3_7_7, 0.1_1_4_7, 0.1_3_3_3, -0.4_8_4_1, -0.2_5_0_6, -0.0_8_0_5, -0.0_4_9_1, -0.4_0_8_5], [0.0_3_5_0, 0.0_8_4_7, 0.0_4_6_7, 0.0_3_4_4, -0.0_8_4_2, -0.0_5_4_7, -0.0_6_3_3, -0.1_1_3_1]],
# fmt: on
] )
def _snake_case ( self : int , __UpperCamelCase : int , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.get_sd_vae_model()
_UpperCAmelCase = self.get_sd_image(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase ).sample
assert sample.shape == image.shape
_UpperCAmelCase = sample[-1, -2:, -2:, :2].flatten().float().cpu()
_UpperCAmelCase = torch.tensor(expected_slice_mps if torch_device == """mps""" else expected_slice )
assert torch_all_close(__UpperCamelCase , __UpperCamelCase , atol=3e-3 )
@parameterized.expand(
[
# fmt: off
[13, [-0.2_0_5_1, -0.1_8_0_3, -0.2_3_1_1, -0.2_1_1_4, -0.3_2_9_2, -0.3_5_7_4, -0.2_9_5_3, -0.3_3_2_3]],
[37, [-0.2_6_3_2, -0.2_6_2_5, -0.2_1_9_9, -0.2_7_4_1, -0.4_5_3_9, -0.4_9_9_0, -0.3_7_2_0, -0.4_9_2_5]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.get_sd_vae_model()
_UpperCAmelCase = self.get_sd_image(__UpperCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_UpperCAmelCase = model.decode(__UpperCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
_UpperCAmelCase = sample[-1, -2:, :2, -2:].flatten().cpu()
_UpperCAmelCase = torch.tensor(__UpperCamelCase )
assert torch_all_close(__UpperCamelCase , __UpperCamelCase , atol=1e-3 )
@parameterized.expand(
[
# fmt: off
[27, [-0.0_3_6_9, 0.0_2_0_7, -0.0_7_7_6, -0.0_6_8_2, -0.1_7_4_7, -0.1_9_3_0, -0.1_4_6_5, -0.2_0_3_9]],
[16, [-0.1_6_2_8, -0.2_1_3_4, -0.2_7_4_7, -0.2_6_4_2, -0.3_7_7_4, -0.4_4_0_4, -0.3_6_8_7, -0.4_2_7_7]],
# fmt: on
] )
@require_torch_gpu
def _snake_case ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.get_sd_vae_model(fpaa=__UpperCamelCase )
_UpperCAmelCase = self.get_sd_image(__UpperCamelCase , shape=(3, 4, 64, 64) , fpaa=__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model.decode(__UpperCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
_UpperCAmelCase = sample[-1, -2:, :2, -2:].flatten().float().cpu()
_UpperCAmelCase = torch.tensor(__UpperCamelCase )
assert torch_all_close(__UpperCamelCase , __UpperCamelCase , atol=5e-3 )
@parameterized.expand([(13,), (16,), (27,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.get_sd_vae_model(fpaa=__UpperCamelCase )
_UpperCAmelCase = self.get_sd_image(__UpperCamelCase , shape=(3, 4, 64, 64) , fpaa=__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model.decode(__UpperCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_UpperCAmelCase = model.decode(__UpperCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(__UpperCamelCase , __UpperCamelCase , atol=1e-1 )
@parameterized.expand([(13,), (16,), (37,)] )
@require_torch_gpu
@unittest.skipIf(not is_xformers_available() , reason="""xformers is not required when using PyTorch 2.0.""" )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.get_sd_vae_model()
_UpperCAmelCase = self.get_sd_image(__UpperCamelCase , shape=(3, 4, 64, 64) )
with torch.no_grad():
_UpperCAmelCase = model.decode(__UpperCamelCase ).sample
model.enable_xformers_memory_efficient_attention()
with torch.no_grad():
_UpperCAmelCase = model.decode(__UpperCamelCase ).sample
assert list(sample.shape ) == [3, 3, 5_12, 5_12]
assert torch_all_close(__UpperCamelCase , __UpperCamelCase , atol=1e-2 )
@parameterized.expand(
[
# fmt: off
[33, [-0.3_0_0_1, 0.0_9_1_8, -2.6_9_8_4, -3.9_7_2_0, -3.2_0_9_9, -5.0_3_5_3, 1.7_3_3_8, -0.2_0_6_5, 3.4_2_6_7]],
[47, [-1.5_0_3_0, -4.3_8_7_1, -6.0_3_5_5, -9.1_1_5_7, -1.6_6_6_1, -2.7_8_5_3, 2.1_6_0_7, -5.0_8_2_3, 2.5_6_3_3]],
# fmt: on
] )
def _snake_case ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.get_sd_vae_model()
_UpperCAmelCase = self.get_sd_image(__UpperCamelCase )
_UpperCAmelCase = self.get_generator(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model.encode(__UpperCamelCase ).latent_dist
_UpperCAmelCase = dist.sample(generator=__UpperCamelCase )
assert list(sample.shape ) == [image.shape[0], 4] + [i // 8 for i in image.shape[2:]]
_UpperCAmelCase = sample[0, -1, -3:, -3:].flatten().cpu()
_UpperCAmelCase = torch.tensor(__UpperCamelCase )
_UpperCAmelCase = 3e-3 if torch_device != """mps""" else 1e-2
assert torch_all_close(__UpperCamelCase , __UpperCamelCase , atol=__UpperCamelCase ) | 714 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( _UpperCAmelCase ):
def _snake_case ( self : str ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : Any ) ->List[str]:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _snake_case ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : str ) ->Dict:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def _snake_case ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def _snake_case ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _snake_case ( self : str ) ->Optional[Any]:
'''simple docstring'''
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__UpperCamelCase ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
_UpperCAmelCase ,_UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __UpperCamelCase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(_A ) if isinstance(_A , pa.Buffer ) else pa.memory_map(_A )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=_A , features=_A ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_A )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
_UpperCAmelCase = os.path.join(_A , """test.arrow""" )
with ArrowWriter(path=_A , schema=pa.schema(_A ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(_A , 1 )
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
if pa.types.is_list(_A ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
if isinstance(lst[0] , _A ):
change_first_primitive_element_in_list(lst[0] , _A )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(_A , optimized_int_type=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(_A )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_A , _A )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=_A ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = """mock://dataset-train.arrow"""
with ArrowWriter(path=_A , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_A ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_A )
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_A , format="""png""" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=_A , features=Features({"""image""": Image()} ) , embed_local_files=_A ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , _A )
with open(_A , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("""col_1""" , pa.string() , nullable=_A )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=_A ) as writer:
writer._build_writer(inferred_schema=_A )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] ) | 19 | 0 |
"""simple docstring"""
from __future__ import annotations
def _UpperCamelCase ( _A ) -> None:
"""simple docstring"""
create_state_space_tree(_A , [] , 0 , [0 for i in range(len(_A ) )] )
def _UpperCamelCase ( _A , _A , _A , _A , ) -> None:
"""simple docstring"""
if index == len(_A ):
print(_A )
return
for i in range(len(_A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_UpperCAmelCase = True
create_state_space_tree(_A , _A , index + 1 , _A )
current_sequence.pop()
_UpperCAmelCase = False
a : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
a : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 715 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : List[Any] = get_logger()
a : Optional[dict] = None
class a_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : int ) ->Tuple:
'''simple docstring'''
super().__init__(features=__UpperCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(__UpperCamelCase )}, as `jaxlib.xla_extension.Device` """
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
_UpperCAmelCase = device if isinstance(__UpperCamelCase , __UpperCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
_UpperCAmelCase = str(jax.devices()[0] )
_UpperCAmelCase = jnp_array_kwargs
@staticmethod
def _snake_case ( ) ->Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(__UpperCamelCase ): device for device in jax.devices()}
def _snake_case ( self : Dict , __UpperCamelCase : Any ) ->Union[str, Any]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , __UpperCamelCase ) and column:
if all(
isinstance(__UpperCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__UpperCamelCase , axis=0 )
return column
def _snake_case ( self : List[str] , __UpperCamelCase : Any ) ->Optional[int]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , (str, bytes, type(__UpperCamelCase )) ):
return value
elif isinstance(__UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase = {}
if isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
else:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
elif isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCamelCase , PIL.Image.Image ):
_UpperCAmelCase = np.asarray(__UpperCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__UpperCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__UpperCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__UpperCamelCase , """__array__""" ) and not isinstance(__UpperCamelCase , jax.Array ):
_UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
elif isinstance(__UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : dict ) ->int:
'''simple docstring'''
return map_nested(self._recursive_tensorize , __UpperCamelCase , map_list=__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_row(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_row(__UpperCamelCase )
return self.recursive_tensorize(__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : pa.Table ) ->"jax.Array":
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_column(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_column(__UpperCamelCase , pa_table.column_names[0] )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
_UpperCAmelCase = self._consolidate(__UpperCamelCase )
return column
def _snake_case ( self : Optional[Any] , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_batch(__UpperCamelCase )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
for column_name in batch:
_UpperCAmelCase = self._consolidate(batch[column_name] )
return batch | 19 | 0 |
"""simple docstring"""
import logging
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from seqaseq_trainer import SeqaSeqTrainer
from seqaseq_training_args import SeqaSeqTrainingArguments
import transformers
from transformers import (
AutoConfig,
AutoModelForSeqaSeqLM,
AutoTokenizer,
HfArgumentParser,
MBartTokenizer,
MBartTokenizerFast,
set_seed,
)
from transformers.trainer_utils import EvaluationStrategy, is_main_process
from transformers.training_args import ParallelMode
from utils import (
SeqaSeqDataCollator,
SeqaSeqDataset,
assert_all_frozen,
build_compute_metrics_fn,
check_output_dir,
freeze_embeds,
freeze_params,
lmap,
save_json,
use_task_specific_params,
write_txt_file,
)
a : Tuple = logging.getLogger(__name__)
@dataclass
class a_ :
'''simple docstring'''
a : str = field(
metadata={'help': 'Path to pretrained model or model identifier from huggingface.co/models'} )
a : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a : Optional[str] = field(
default=_UpperCAmelCase , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether tp freeze the encoder.'} )
a : bool = field(default=_UpperCAmelCase , metadata={'help': 'Whether to freeze the embeddings.'} )
@dataclass
class a_ :
'''simple docstring'''
a : str = field(
metadata={'help': 'The input data dir. Should contain the .tsv files (or other data files) for the task.'} )
a : Optional[str] = field(
default='summarization' , metadata={'help': 'Task name, summarization (or summarization_{dataset} for pegasus) or translation'} , )
a : Optional[int] = field(
default=1024 , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a : Optional[int] = field(
default=128 , metadata={
'help': (
'The maximum total sequence length for target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for validation target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded. '
'This argument is also used to override the ``max_length`` param of ``model.generate``, which is used '
'during ``evaluate`` and ``predict``.'
)
} , )
a : Optional[int] = field(
default=142 , metadata={
'help': (
'The maximum total sequence length for test target text after tokenization. Sequences longer '
'than this will be truncated, sequences shorter will be padded.'
)
} , )
a : Optional[int] = field(default=-1 , metadata={'help': '# training examples. -1 means use all.'} )
a : Optional[int] = field(default=-1 , metadata={'help': '# validation examples. -1 means use all.'} )
a : Optional[int] = field(default=-1 , metadata={'help': '# test examples. -1 means use all.'} )
a : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Source language id for translation.'} )
a : Optional[str] = field(default=_UpperCAmelCase , metadata={'help': 'Target language id for translation.'} )
a : Optional[int] = field(default=_UpperCAmelCase , metadata={'help': '# num_beams to use for evaluation.'} )
a : bool = field(
default=_UpperCAmelCase , metadata={'help': 'If only pad tokens should be ignored. This assumes that `config.pad_token_id` is defined.'} , )
def _UpperCamelCase ( _A , _A , _A ) -> Union[str, Any]:
"""simple docstring"""
logger.info(F"""***** {split} metrics *****""" )
for key in sorted(metrics.keys() ):
logger.info(F""" {key} = {metrics[key]}""" )
save_json(_A , os.path.join(_A , F"""{split}_results.json""" ) )
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, SeqaSeqTrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = parser.parse_args_into_dataclasses()
check_output_dir(_A )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO if training_args.local_rank in [-1, 0] else logging.WARN , )
logger.warning(
"""Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s""" , training_args.local_rank , training_args.device , training_args.n_gpu , bool(training_args.parallel_mode == ParallelMode.DISTRIBUTED ) , training_args.fpaa , )
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
logger.info("""Training/evaluation parameters %s""" , _A )
# Set seed
set_seed(training_args.seed )
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = AutoConfig.from_pretrained(
model_args.config_name if model_args.config_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase = ("""encoder_layerdrop""", """decoder_layerdrop""", """dropout""", """attention_dropout""")
for p in extra_model_params:
if getattr(_A , _A , _A ):
assert hasattr(_A , _A ), F"""({config.__class__.__name__}) doesn't have a `{p}` attribute"""
setattr(_A , _A , getattr(_A , _A ) )
_UpperCAmelCase = AutoTokenizer.from_pretrained(
model_args.tokenizer_name if model_args.tokenizer_name else model_args.model_name_or_path , cache_dir=model_args.cache_dir , )
_UpperCAmelCase = AutoModelForSeqaSeqLM.from_pretrained(
model_args.model_name_or_path , from_tf=""".ckpt""" in model_args.model_name_or_path , config=_A , cache_dir=model_args.cache_dir , )
# use task specific params
use_task_specific_params(_A , data_args.task )
# set num_beams for evaluation
if data_args.eval_beams is None:
_UpperCAmelCase = model.config.num_beams
# set decoder_start_token_id for MBart
if model.config.decoder_start_token_id is None and isinstance(_A , (MBartTokenizer, MBartTokenizerFast) ):
assert (
data_args.tgt_lang is not None and data_args.src_lang is not None
), "mBart requires --tgt_lang and --src_lang"
if isinstance(_A , _A ):
_UpperCAmelCase = tokenizer.lang_code_to_id[data_args.tgt_lang]
else:
_UpperCAmelCase = tokenizer.convert_tokens_to_ids(data_args.tgt_lang )
if model_args.freeze_embeds:
freeze_embeds(_A )
if model_args.freeze_encoder:
freeze_params(model.get_encoder() )
assert_all_frozen(model.get_encoder() )
_UpperCAmelCase = SeqaSeqDataset
# Get datasets
_UpperCAmelCase = (
dataset_class(
_A , type_path="""train""" , data_dir=data_args.data_dir , n_obs=data_args.n_train , max_target_length=data_args.max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_train
else None
)
_UpperCAmelCase = (
dataset_class(
_A , type_path="""val""" , data_dir=data_args.data_dir , n_obs=data_args.n_val , max_target_length=data_args.val_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_eval or training_args.evaluation_strategy != EvaluationStrategy.NO
else None
)
_UpperCAmelCase = (
dataset_class(
_A , type_path="""test""" , data_dir=data_args.data_dir , n_obs=data_args.n_test , max_target_length=data_args.test_max_target_length , max_source_length=data_args.max_source_length , prefix=model.config.prefix or """""" , )
if training_args.do_predict
else None
)
# Initialize our Trainer
_UpperCAmelCase = (
build_compute_metrics_fn(data_args.task , _A ) if training_args.predict_with_generate else None
)
_UpperCAmelCase = SeqaSeqTrainer(
model=_A , args=_A , data_args=_A , train_dataset=_A , eval_dataset=_A , data_collator=SeqaSeqDataCollator(
_A , _A , model.config.decoder_start_token_id , training_args.tpu_num_cores ) , compute_metrics=_A , tokenizer=_A , )
_UpperCAmelCase = {}
# Training
if training_args.do_train:
logger.info("""*** Train ***""" )
_UpperCAmelCase = trainer.train(
model_path=model_args.model_name_or_path if os.path.isdir(model_args.model_name_or_path ) else None )
_UpperCAmelCase = train_result.metrics
_UpperCAmelCase = data_args.n_train
trainer.save_model() # this also saves the tokenizer
if trainer.is_world_process_zero():
handle_metrics("""train""" , _A , training_args.output_dir )
all_metrics.update(_A )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# For convenience, we also re-save the tokenizer to the same directory,
# so that you can share your model easily on huggingface.co/models =)
tokenizer.save_pretrained(training_args.output_dir )
# Evaluation
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCAmelCase = trainer.evaluate(metric_key_prefix="""val""" )
_UpperCAmelCase = data_args.n_val
_UpperCAmelCase = round(metrics["""val_loss"""] , 4 )
if trainer.is_world_process_zero():
handle_metrics("""val""" , _A , training_args.output_dir )
all_metrics.update(_A )
if training_args.do_predict:
logger.info("""*** Predict ***""" )
_UpperCAmelCase = trainer.predict(test_dataset=_A , metric_key_prefix="""test""" )
_UpperCAmelCase = test_output.metrics
_UpperCAmelCase = data_args.n_test
if trainer.is_world_process_zero():
_UpperCAmelCase = round(metrics["""test_loss"""] , 4 )
handle_metrics("""test""" , _A , training_args.output_dir )
all_metrics.update(_A )
if training_args.predict_with_generate:
_UpperCAmelCase = tokenizer.batch_decode(
test_output.predictions , skip_special_tokens=_A , clean_up_tokenization_spaces=_A )
_UpperCAmelCase = lmap(str.strip , _A )
write_txt_file(_A , os.path.join(training_args.output_dir , """test_generations.txt""" ) )
if trainer.is_world_process_zero():
save_json(_A , os.path.join(training_args.output_dir , """all_results.json""" ) )
return all_metrics
def _UpperCamelCase ( _A ) -> List[str]:
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 716 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Tuple = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 19 | 0 |
"""simple docstring"""
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class a_ ( _UpperCAmelCase ):
a : List[Any] = (DDPMScheduler,)
def _snake_case ( self : List[str] , **__UpperCamelCase : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = {
"""num_train_timesteps""": 10_00,
"""beta_start""": 0.0_0_0_1,
"""beta_end""": 0.0_2,
"""beta_schedule""": """linear""",
"""variance_type""": """fixed_small""",
"""clip_sample""": True,
}
config.update(**__UpperCamelCase )
return config
def _snake_case ( self : Any ) ->Union[str, Any]:
'''simple docstring'''
for timesteps in [1, 5, 1_00, 10_00]:
self.check_over_configs(num_train_timesteps=__UpperCamelCase )
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
for beta_start, beta_end in zip([0.0_0_0_1, 0.0_0_1, 0.0_1, 0.1] , [0.0_0_2, 0.0_2, 0.2, 2] ):
self.check_over_configs(beta_start=__UpperCamelCase , beta_end=__UpperCamelCase )
def _snake_case ( self : int ) ->Tuple:
'''simple docstring'''
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=__UpperCamelCase )
def _snake_case ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=__UpperCamelCase )
def _snake_case ( self : int ) ->Union[str, Any]:
'''simple docstring'''
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=__UpperCamelCase )
def _snake_case ( self : Union[str, Any] ) ->List[str]:
'''simple docstring'''
self.check_over_configs(thresholding=__UpperCamelCase )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=__UpperCamelCase , prediction_type=__UpperCamelCase , sample_max_value=__UpperCamelCase , )
def _snake_case ( self : int ) ->List[Any]:
'''simple docstring'''
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=__UpperCamelCase )
def _snake_case ( self : int ) ->int:
'''simple docstring'''
for t in [0, 5_00, 9_99]:
self.check_over_forward(time_step=__UpperCamelCase )
def _snake_case ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(4_87 ) - 0.0_0_9_7_9 ) ) < 1e-5
assert torch.sum(torch.abs(scheduler._get_variance(9_99 ) - 0.0_2 ) ) < 1e-5
def _snake_case ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = len(__UpperCamelCase )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
_UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCAmelCase = pred_prev_sample
_UpperCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 2_58.96_06 ) < 1e-2
assert abs(result_mean.item() - 0.3_3_7_2 ) < 1e-3
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config(prediction_type="""v_prediction""" )
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = len(__UpperCamelCase )
_UpperCAmelCase = self.dummy_model()
_UpperCAmelCase = self.dummy_sample_deter
_UpperCAmelCase = torch.manual_seed(0 )
for t in reversed(range(__UpperCamelCase ) ):
# 1. predict noise residual
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase )
# 2. predict previous mean of sample x_t-1
_UpperCAmelCase = scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , generator=__UpperCamelCase ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
_UpperCAmelCase = pred_prev_sample
_UpperCAmelCase = torch.sum(torch.abs(__UpperCamelCase ) )
_UpperCAmelCase = torch.mean(torch.abs(__UpperCamelCase ) )
assert abs(result_sum.item() - 2_02.02_96 ) < 1e-2
assert abs(result_mean.item() - 0.2_6_3_1 ) < 1e-3
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = [1_00, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=__UpperCamelCase )
_UpperCAmelCase = scheduler.timesteps
for i, timestep in enumerate(__UpperCamelCase ):
if i == len(__UpperCamelCase ) - 1:
_UpperCAmelCase = -1
else:
_UpperCAmelCase = timesteps[i + 1]
_UpperCAmelCase = scheduler.previous_timestep(__UpperCamelCase )
_UpperCAmelCase = prev_t.item()
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = [1_00, 87, 50, 51, 0]
with self.assertRaises(__UpperCamelCase , msg="""`custom_timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=__UpperCamelCase )
def _snake_case ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = [1_00, 87, 50, 1, 0]
_UpperCAmelCase = len(__UpperCamelCase )
with self.assertRaises(__UpperCamelCase , msg="""Can only pass one of `num_inference_steps` or `custom_timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=__UpperCamelCase , timesteps=__UpperCamelCase )
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.scheduler_classes[0]
_UpperCAmelCase = self.get_scheduler_config()
_UpperCAmelCase = scheduler_class(**__UpperCamelCase )
_UpperCAmelCase = [scheduler.config.num_train_timesteps]
with self.assertRaises(
__UpperCamelCase , msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" , ):
scheduler.set_timesteps(timesteps=__UpperCamelCase ) | 717 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
a : int = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _UpperCamelCase ( _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = test_results.split(""" """ )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_UpperCAmelCase = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(_A ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = None
_UpperCAmelCase = False
for line in failures_short_lines.split("""\n""" ):
if re.search(R"""_ \[doctest\]""" , _A ):
_UpperCAmelCase = True
_UpperCAmelCase = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
_UpperCAmelCase = line
_UpperCAmelCase = False
return failures
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = title
_UpperCAmelCase = doc_test_results["""time_spent"""].split(""",""" )[0]
_UpperCAmelCase = doc_test_results["""success"""]
_UpperCAmelCase = doc_test_results["""failures"""]
_UpperCAmelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
_UpperCAmelCase = doc_test_results
@property
def _snake_case ( self : int ) ->str:
'''simple docstring'''
_UpperCAmelCase = [self._time_spent]
_UpperCAmelCase = 0
for time in time_spent:
_UpperCAmelCase = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = [0, 0, time_parts[0]]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f"""{int(__UpperCamelCase )}h{int(__UpperCamelCase )}m{int(__UpperCamelCase )}s"""
@property
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = 40
_UpperCAmelCase = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(__UpperCamelCase , __UpperCamelCase )}
_UpperCAmelCase = """"""
for category, failures in category_failures.items():
if len(__UpperCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__UpperCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__UpperCamelCase )
@staticmethod
def _snake_case ( ) ->Any:
'''simple docstring'''
_UpperCAmelCase = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(__UpperCamelCase )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=__UpperCamelCase , )
def _snake_case ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
_UpperCAmelCase = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else """All tests passed."""
_UpperCAmelCase = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=__UpperCamelCase , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase = """"""
for key, value in failures.items():
_UpperCAmelCase = value[:2_00] + """ [Truncated]""" if len(__UpperCamelCase ) > 2_50 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
_UpperCAmelCase = job_name
_UpperCAmelCase = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
_UpperCAmelCase = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _snake_case ( self : int ) ->Optional[Any]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
_UpperCAmelCase = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
_UpperCAmelCase = sorted(self.doc_test_results.items() , key=lambda __UpperCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
_UpperCAmelCase = f"""*Num failures* :{len(job_result["failed"] )} \n"""
_UpperCAmelCase = job_result["""failures"""]
_UpperCAmelCase = self.get_reply_blocks(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , text=__UpperCamelCase )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f"""Results for {job}""" , blocks=__UpperCamelCase , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = os.environ["""GITHUB_RUN_ID"""]
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A ).json()
_UpperCAmelCase = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , _A )
return {}
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {}
if os.path.exists(_A ):
_UpperCAmelCase = os.listdir(_A )
for file in files:
try:
with open(os.path.join(_A , _A ) , encoding="""utf-8""" ) as f:
_UpperCAmelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(_A , _A )}.""" ) from e
return _artifact
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
class a_ :
def __init__( self : List[Any] , __UpperCamelCase : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = name
_UpperCAmelCase = []
def __str__( self : int ) ->Optional[Any]:
'''simple docstring'''
return self.name
def _snake_case ( self : Dict , __UpperCamelCase : str ) ->int:
'''simple docstring'''
self.paths.append({"""name""": self.name, """path""": path} )
_UpperCAmelCase = {}
_UpperCAmelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
_UpperCAmelCase = directory
if artifact_name not in _available_artifacts:
_UpperCAmelCase = Artifact(_A )
_available_artifacts[artifact_name].add_path(_A )
return _available_artifacts
if __name__ == "__main__":
a : Dict = get_job_links()
a : Dict = retrieve_available_artifacts()
a : Optional[int] = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
a : Dict = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
a : int = github_actions_job_links.get('''run_doctests''')
a : Tuple = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
a : Optional[Any] = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
a , a , a : str = handle_test_results(artifact['''stats'''])
a : Tuple = failed
a : int = success
a : Any = time_spent[1:-1] + ''', '''
a : Dict = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
a : List[Any] = line.replace('''FAILED ''', '''''')
a : Tuple = line.split()[0].replace('''\n''', '''''')
if "::" in line:
a , a : Union[str, Any] = line.split('''::''')
else:
a , a : Optional[Any] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
a : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
a : Optional[Any] = all_failures[test] if test in all_failures else '''N/A'''
a : List[str] = failure
break
a : List[Any] = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply() | 19 | 0 |
import warnings
from ...utils import logging
from .image_processing_deit import DeiTImageProcessor
a : Any = logging.get_logger(__name__)
class a_ ( _UpperCAmelCase ):
def __init__( self : Optional[int] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[int] ) ->None:
'''simple docstring'''
warnings.warn(
"""The class DeiTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DeiTImageProcessor instead.""" , __UpperCamelCase , )
super().__init__(*__UpperCamelCase , **__UpperCamelCase ) | 718 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase ( _A , _A=False ) -> str:
"""simple docstring"""
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
a : Union[str, Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
a : Tuple = parse_flag_from_env('''RUN_REMOTE''', default=False)
a : Union[str, Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
a : int = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
a : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
a : List[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
a : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
a : int = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
a : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
a : Any = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
a : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires faiss""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires regex""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires elasticsearch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires sqlalchemy""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires PyTorch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
if not config.TF_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires TensorFlow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
if not config.JAX_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires JAX""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not config.PIL_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires Pillow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
def _require_spacy_model(_A ):
try:
import spacy # noqa F401
spacy.load(_A )
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(_A ) )(_A )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCAmelCase = unittest.skip("""test is slow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
_UpperCAmelCase = unittest.skip("""test is local""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCAmelCase = unittest.skip("""test is packaged""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCAmelCase = unittest.skip("""test requires remote""" )(_A )
return test_case
def _UpperCamelCase ( *_A ) -> Dict:
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_A ) and name.startswith("""test""" ):
for decorator in decorators:
_UpperCAmelCase = decorator(_A )
setattr(cls , _A , _A )
return cls
return decorate
class a_ ( _UpperCAmelCase ):
pass
class a_ ( _UpperCAmelCase ):
a : Any = 0
a : Optional[Any] = 1
a : int = 2
@contextmanager
def _UpperCamelCase ( _A=OfflineSimulationMode.CONNECTION_FAILS , _A=1e-16 ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = requests.Session().request
def timeout_request(_A , _A , _A , **_A ):
# Change the url to an invalid url so that the connection hangs
_UpperCAmelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_UpperCAmelCase = timeout
try:
return online_request(_A , _A , **_A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCAmelCase = url
_UpperCAmelCase = e.args[0]
_UpperCAmelCase = (max_retry_error.args[0].replace("""10.255.255.1""" , F"""OfflineMock[{url}]""" ),)
_UpperCAmelCase = (max_retry_error,)
raise
def raise_connection_error(_A , _A , **_A ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=_A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , _A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , _A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , _A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _UpperCamelCase ( *_A , **_A ) -> str:
"""simple docstring"""
_UpperCAmelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_A , **_A ) as tmp_dir:
try:
os.chdir(_A )
yield
finally:
os.chdir(_A )
@contextmanager
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
return deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist()
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_A , *_A , **_A ):
try:
return func(*_A , **_A )
except HTTPError as err:
if str(_A ).startswith("""500""" ) or str(_A ).startswith("""502""" ):
pytest.xfail(str(_A ) )
raise err
return decorator.decorator(_wrapper , _A )
class a_ :
def __init__( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_A )
else:
break
async def _UpperCamelCase ( _A , _A=None , _A=None , _A=None , _A=False , _A=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(_A ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_A , _A , _A , _A="" ):
_UpperCAmelCase = line.decode("""utf-8""" ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _A : tee(_A , _A , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda _A : tee(_A , _A , sys.stderr , label="""stderr:""" ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def _UpperCamelCase ( _A , _A=None , _A=None , _A=1_8_0 , _A=False , _A=True ) -> _RunOutput:
"""simple docstring"""
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
_UpperCAmelCase = """ """.join(_A )
if result.returncode > 0:
_UpperCAmelCase = """\n""".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
_UpperCAmelCase = re.sub(R"""^gw""" , """""" , _A , 0 , re.M )
return int(_A )
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 2_9_5_0_0
_UpperCAmelCase = pytest_xdist_worker_id()
return port + uniq_delta | 19 | 0 |
"""simple docstring"""
import fire
from utils import calculate_rouge, save_json
def _UpperCamelCase ( _A , _A , _A=None , **_A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = [x.strip() for x in open(_A ).readlines()]
_UpperCAmelCase = [x.strip() for x in open(_A ).readlines()][: len(_A )]
_UpperCAmelCase = calculate_rouge(_A , _A , **_A )
if save_path is not None:
save_json(_A , _A , indent=_A )
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path) | 719 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class a_ ( _UpperCAmelCase ):
a : List[Any] = ''
a : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Tuple , __UpperCamelCase : Optional[DatasetInfo] = None , __UpperCamelCase : Optional[str] = None , **__UpperCamelCase : Any , ) ->Any:
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
_UpperCAmelCase = repo_info
_UpperCAmelCase = token
_UpperCAmelCase = None
def _snake_case ( self : List[str] ) ->List[str]:
'''simple docstring'''
if self.dir_cache is None:
_UpperCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_UpperCAmelCase = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : str = "rb" , **__UpperCamelCase : Any , ) ->List[str]:
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
_UpperCAmelCase = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def _snake_case ( self : int , __UpperCamelCase : int , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple=False , **__UpperCamelCase : List[str] ) ->Optional[Any]:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = PurePosixPath(path.strip("""/""" ) )
_UpperCAmelCase = {}
for p, f in self.dir_cache.items():
_UpperCAmelCase = PurePosixPath(p.strip("""/""" ) )
_UpperCAmelCase = p.parent
if root == path:
_UpperCAmelCase = f
_UpperCAmelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 19 | 0 |
def _UpperCamelCase ( _A , _A , _A , _A ) -> bool:
"""simple docstring"""
if graph[path[curr_ind - 1]][next_ver] == 0:
return False
# 2. Validate that next vertex is not already in path
return not any(vertex == next_ver for vertex in path )
def _UpperCamelCase ( _A , _A , _A ) -> bool:
"""simple docstring"""
if curr_ind == len(_A ):
# return whether path exists between current and starting vertices
return graph[path[curr_ind - 1]][path[0]] == 1
# Recursive Step
for next_ver in range(0 , len(_A ) ):
if valid_connection(_A , _A , _A , _A ):
# Insert current vertex into path as next transition
_UpperCAmelCase = next_ver
# Validate created path
if util_hamilton_cycle(_A , _A , curr_ind + 1 ):
return True
# Backtrack
_UpperCAmelCase = -1
return False
def _UpperCamelCase ( _A , _A = 0 ) -> list[int]:
"""simple docstring"""
_UpperCAmelCase = [-1] * (len(_A ) + 1)
# initialize start and end of path with starting index
_UpperCAmelCase = _UpperCAmelCase = start_index
# evaluate and if we find answer return path either return empty array
return path if util_hamilton_cycle(_A , _A , 1 ) else [] | 720 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
a : Optional[Any] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
a : List[str] = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
a : Any = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
a : int = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
a : List[Any] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any]=[1, 10, 1_00] , __UpperCamelCase : Dict=4 , __UpperCamelCase : Tuple=3.0 ) ->Union[str, Any]:
'''simple docstring'''
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=__UpperCamelCase ) as executor:
_UpperCAmelCase = []
_UpperCAmelCase = Counter()
_UpperCAmelCase = 0
_UpperCAmelCase = defaultdict(__UpperCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(__UpperCamelCase , __UpperCamelCase ) ):
for candidate in candidates:
_UpperCAmelCase = candidate + """\n""" + test_case
_UpperCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
_UpperCAmelCase = executor.submit(__UpperCamelCase , *__UpperCamelCase )
futures.append(__UpperCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__UpperCamelCase ):
_UpperCAmelCase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
_UpperCAmelCase ,_UpperCAmelCase = [], []
for result in results.values():
result.sort()
_UpperCAmelCase = [r[1]["""passed"""] for r in result]
total.append(len(__UpperCamelCase ) )
correct.append(sum(__UpperCamelCase ) )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = k
_UpperCAmelCase = {f"""pass@{k}""": estimate_pass_at_k(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
def estimator(_A , _A , _A ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_A , _A ):
_UpperCAmelCase = itertools.repeat(_A , len(_A ) )
else:
assert len(_A ) == len(_A )
_UpperCAmelCase = iter(_A )
return np.array([estimator(int(_A ) , int(_A ) , _A ) for n, c in zip(_A , _A )] ) | 19 | 0 |
from typing import List
from .keymap import KEYMAP, get_character
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
def decorator(_A ):
_UpperCAmelCase = getattr(_A , """handle_key""" , [] )
handle += [key]
setattr(_A , """handle_key""" , _A )
return func
return decorator
def _UpperCamelCase ( *_A ) -> Optional[int]:
"""simple docstring"""
def decorator(_A ):
_UpperCAmelCase = getattr(_A , """handle_key""" , [] )
handle += keys
setattr(_A , """handle_key""" , _A )
return func
return decorator
class a_ ( _UpperCAmelCase ):
def __new__( cls : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = super().__new__(cls , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
if not hasattr(__UpperCamelCase , """key_handler""" ):
setattr(__UpperCamelCase , """key_handler""" , {} )
setattr(__UpperCamelCase , """handle_input""" , KeyHandler.handle_input )
for value in attrs.values():
_UpperCAmelCase = getattr(__UpperCamelCase , """handle_key""" , [] )
for key in handled_keys:
_UpperCAmelCase = value
return new_cls
@staticmethod
def _snake_case ( cls : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = get_character()
if char != KEYMAP["undefined"]:
_UpperCAmelCase = ord(__UpperCamelCase )
_UpperCAmelCase = cls.key_handler.get(__UpperCamelCase )
if handler:
_UpperCAmelCase = char
return handler(cls )
else:
return None
def _UpperCamelCase ( cls ) -> Optional[Any]:
"""simple docstring"""
return KeyHandler(cls.__name__ , cls.__bases__ , cls.__dict__.copy() ) | 721 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> np.array:
"""simple docstring"""
_UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCAmelCase = np.zeros((n + 1,) )
_UpperCAmelCase = ya
_UpperCAmelCase = xa
for k in range(_A ):
_UpperCAmelCase = y[k] + step_size * ode_func(_A , y[k] )
_UpperCAmelCase = y[k] + (
(step_size / 2) * (ode_func(_A , y[k] ) + ode_func(x + step_size , _A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 19 | 0 |
"""simple docstring"""
import numpy as np
a : Union[str, Any] = [
['''a''', '''b''', '''c''', '''d''', '''e'''],
['''f''', '''g''', '''h''', '''i''', '''k'''],
['''l''', '''m''', '''n''', '''o''', '''p'''],
['''q''', '''r''', '''s''', '''t''', '''u'''],
['''v''', '''w''', '''x''', '''y''', '''z'''],
]
class a_ :
def __init__( self : Any ) ->None:
'''simple docstring'''
_UpperCAmelCase = np.array(__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : str ) ->np.ndarray:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = np.where(letter == self.SQUARE )
_UpperCAmelCase = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _snake_case ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : int ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : str ) ->str:
'''simple docstring'''
_UpperCAmelCase = message.lower()
_UpperCAmelCase = message.replace(""" """ , """""" )
_UpperCAmelCase = message.replace("""j""" , """i""" )
_UpperCAmelCase = np.empty((2, len(__UpperCamelCase )) )
for letter_index in range(len(__UpperCamelCase ) ):
_UpperCAmelCase = self.letter_to_numbers(message[letter_index] )
_UpperCAmelCase = numbers[0]
_UpperCAmelCase = numbers[1]
_UpperCAmelCase = first_step.reshape(2 * len(__UpperCamelCase ) )
_UpperCAmelCase = """"""
for numbers_index in range(len(__UpperCamelCase ) ):
_UpperCAmelCase = int(second_step[numbers_index * 2] )
_UpperCAmelCase = int(second_step[(numbers_index * 2) + 1] )
_UpperCAmelCase = self.numbers_to_letter(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = encoded_message + letter
return encoded_message
def _snake_case ( self : List[Any] , __UpperCamelCase : str ) ->str:
'''simple docstring'''
_UpperCAmelCase = message.lower()
message.replace(""" """ , """""" )
_UpperCAmelCase = np.empty(2 * len(__UpperCamelCase ) )
for letter_index in range(len(__UpperCamelCase ) ):
_UpperCAmelCase = self.letter_to_numbers(message[letter_index] )
_UpperCAmelCase = numbers[0]
_UpperCAmelCase = numbers[1]
_UpperCAmelCase = first_step.reshape((2, len(__UpperCamelCase )) )
_UpperCAmelCase = """"""
for numbers_index in range(len(__UpperCamelCase ) ):
_UpperCAmelCase = int(second_step[0, numbers_index] )
_UpperCAmelCase = int(second_step[1, numbers_index] )
_UpperCAmelCase = self.numbers_to_letter(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = decoded_message + letter
return decoded_message | 700 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
a : List[str] = logging.getLogger(__name__)
a : int = {'''facebook/bart-base''': BartForConditionalGeneration}
a : Dict = {'''facebook/bart-base''': BartTokenizer}
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_A , default=_A , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_A , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_A , default=_A , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_A , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_A , )
parser.add_argument(
"""--config_name""" , type=_A , default=_A , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_A , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_A , default=_A , help="""Where to store the final ONNX file.""" )
_UpperCAmelCase = parser.parse_args()
return args
def _UpperCamelCase ( _A , _A="cpu" ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = model_dict[model_name].from_pretrained(_A ).to(_A )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_A )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_A ) )
with torch.no_grad():
_UpperCAmelCase = """My friends are cool but they eat too many carbs."""
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="""pt""" ).to(model.device )
_UpperCAmelCase = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_A , max_length=_A , early_stopping=_A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_A , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _A , opset_version=1_4 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_A , )
logger.info("""Model exported to {}""".format(_A ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_A ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_A ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_A )
_UpperCAmelCase = ort_sess.run(
_A , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_A ),
"""max_length""": np.array(_A ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase ,_UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _A )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_A )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_A , _A , _A , _A , _A )
if __name__ == "__main__":
main() | 19 | 0 |
"""simple docstring"""
import os
import re
import shutil
import sys
import tempfile
import unittest
import black
a : int = os.path.abspath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
sys.path.append(os.path.join(git_repo_path, '''utils'''))
import check_copies # noqa: E402
# This is the reference code that will be used in the tests.
# If DDPMSchedulerOutput is changed in scheduling_ddpm.py, this code needs to be manually updated.
a : List[str] = ''' \"""
Output class for the scheduler\'s step function output.
Args:
prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the
denoising loop.
pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images):
The predicted denoised sample (x_{0}) based on the model output from the current timestep.
`pred_original_sample` can be used to preview progress or for guidance.
\"""
prev_sample: torch.FloatTensor
pred_original_sample: Optional[torch.FloatTensor] = None
'''
class a_ ( unittest.TestCase ):
def _snake_case ( self : Optional[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase = tempfile.mkdtemp()
os.makedirs(os.path.join(self.diffusers_dir , """schedulers/""" ) )
_UpperCAmelCase = self.diffusers_dir
shutil.copy(
os.path.join(__UpperCamelCase , """src/diffusers/schedulers/scheduling_ddpm.py""" ) , os.path.join(self.diffusers_dir , """schedulers/scheduling_ddpm.py""" ) , )
def _snake_case ( self : str ) ->str:
'''simple docstring'''
_UpperCAmelCase = """src/diffusers"""
shutil.rmtree(self.diffusers_dir )
def _snake_case ( self : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any]=None ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = comment + f"""\nclass {class_name}(nn.Module):\n""" + class_code
if overwrite_result is not None:
_UpperCAmelCase = comment + f"""\nclass {class_name}(nn.Module):\n""" + overwrite_result
_UpperCAmelCase = black.Mode(target_versions={black.TargetVersion.PYaa} , line_length=1_19 )
_UpperCAmelCase = black.format_str(__UpperCamelCase , mode=__UpperCamelCase )
_UpperCAmelCase = os.path.join(self.diffusers_dir , """new_code.py""" )
with open(__UpperCamelCase , """w""" , newline="""\n""" ) as f:
f.write(__UpperCamelCase )
if overwrite_result is None:
self.assertTrue(len(check_copies.is_copy_consistent(__UpperCamelCase ) ) == 0 )
else:
check_copies.is_copy_consistent(f.name , overwrite=__UpperCamelCase )
with open(__UpperCamelCase , """r""" ) as f:
self.assertTrue(f.read() , __UpperCamelCase )
def _snake_case ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = check_copies.find_code_in_diffusers("""schedulers.scheduling_ddpm.DDPMSchedulerOutput""" )
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , REFERENCE_CODE + """\n""" , )
# With no empty line at the end
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput""" , """DDPMSchedulerOutput""" , __UpperCamelCase , )
# Copy consistency with rename
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , re.sub("""DDPM""" , """Test""" , __UpperCamelCase ) , )
# Copy consistency with a really long name
_UpperCAmelCase = """TestClassWithAReallyLongNameBecauseSomePeopleLikeThatForSomeReason"""
self.check_copy_consistency(
f"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->{long_class_name}""" , f"""{long_class_name}SchedulerOutput""" , re.sub("""Bert""" , __UpperCamelCase , __UpperCamelCase ) , )
# Copy consistency with overwrite
self.check_copy_consistency(
"""# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->Test""" , """TestSchedulerOutput""" , __UpperCamelCase , overwrite_result=re.sub("""DDPM""" , """Test""" , __UpperCamelCase ) , ) | 701 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A , _A , _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = requests.get(_A , headers=_A , allow_redirects=_A )
_UpperCAmelCase = result.headers["""Location"""]
_UpperCAmelCase = requests.get(_A , allow_redirects=_A )
_UpperCAmelCase = os.path.join(_A , F"""{artifact_name}.zip""" )
with open(_A , """wb""" ) as fp:
fp.write(response.content )
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = None
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_A ) as f:
for line in f:
_UpperCAmelCase = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_UpperCAmelCase = line[: line.index(""": """ )]
_UpperCAmelCase = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_UpperCAmelCase = line[len("""FAILED """ ) :]
failed_tests.append(_A )
elif filename == "job_name.txt":
_UpperCAmelCase = line
if len(_A ) != len(_A ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(_A )} for `errors` """
F"""and {len(_A )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
_UpperCAmelCase = None
if job_name and job_links:
_UpperCAmelCase = job_links.get(_A , _A )
# A list with elements of the form (line of error, error, failed test)
_UpperCAmelCase = [x + [y] + [job_link] for x, y in zip(_A , _A )]
return result
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = [os.path.join(_A , _A ) for p in os.listdir(_A ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_A , job_links=_A ) )
return errors
def _UpperCamelCase ( _A , _A=None ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_UpperCAmelCase = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_UpperCAmelCase = test.split("""/""" )[2]
else:
_UpperCAmelCase = None
return test
def _UpperCamelCase ( _A , _A=None ) -> Any:
"""simple docstring"""
_UpperCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_UpperCAmelCase = [x for x in logs if x[2] is not None]
_UpperCAmelCase = {x[2] for x in logs}
_UpperCAmelCase = {}
for test in tests:
_UpperCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_UpperCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_UpperCAmelCase = {"""count""": n_errors, """errors""": error_counts}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = """| no. | error | status |"""
_UpperCAmelCase = """|-:|:-|:-|"""
_UpperCAmelCase = [header, sep]
for error in reduced_by_error:
_UpperCAmelCase = reduced_by_error[error]["""count"""]
_UpperCAmelCase = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(_A )
return "\n".join(_A )
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = """| model | no. of errors | major error | count |"""
_UpperCAmelCase = """|-:|-:|-:|-:|"""
_UpperCAmelCase = [header, sep]
for model in reduced_by_model:
_UpperCAmelCase = reduced_by_model[model]["""count"""]
_UpperCAmelCase ,_UpperCAmelCase = list(reduced_by_model[model]["""errors"""].items() )[0]
_UpperCAmelCase = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(_A )
return "\n".join(_A )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
a : Dict = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a : Tuple = get_job_links(args.workflow_run_id, token=args.token)
a : Tuple = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a : List[Any] = k.find(''' / ''')
a : Tuple = k[index + len(''' / ''') :]
a : int = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a : Union[str, Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a : Optional[int] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a : int = reduce_by_error(errors)
a : str = reduce_by_model(errors)
a : int = make_github_table(reduced_by_error)
a : Optional[int] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa) | 19 | 0 |
"""simple docstring"""
from typing import List, Optional, Tuple, Union
import torch
from ...utils import logging, randn_tensor
from ..pipeline_utils import AudioPipelineOutput, DiffusionPipeline
a : Optional[Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
class a_ ( _UpperCAmelCase ):
def __init__( self : List[str] , __UpperCamelCase : int , __UpperCamelCase : int ) ->Optional[int]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=__UpperCamelCase , scheduler=__UpperCamelCase )
@torch.no_grad()
def __call__( self : int , __UpperCamelCase : int = 1 , __UpperCamelCase : int = 1_00 , __UpperCamelCase : Optional[Union[torch.Generator, List[torch.Generator]]] = None , __UpperCamelCase : Optional[float] = None , __UpperCamelCase : bool = True , ) ->Union[AudioPipelineOutput, Tuple]:
'''simple docstring'''
if audio_length_in_s is None:
_UpperCAmelCase = self.unet.config.sample_size / self.unet.config.sample_rate
_UpperCAmelCase = audio_length_in_s * self.unet.config.sample_rate
_UpperCAmelCase = 2 ** len(self.unet.up_blocks )
if sample_size < 3 * down_scale_factor:
raise ValueError(
f"""{audio_length_in_s} is too small. Make sure it's bigger or equal to"""
f""" {3 * down_scale_factor / self.unet.config.sample_rate}.""" )
_UpperCAmelCase = int(__UpperCamelCase )
if sample_size % down_scale_factor != 0:
_UpperCAmelCase = (
(audio_length_in_s * self.unet.config.sample_rate) // down_scale_factor + 1
) * down_scale_factor
logger.info(
f"""{audio_length_in_s} is increased to {sample_size / self.unet.config.sample_rate} so that it can be handled"""
f""" by the model. It will be cut to {original_sample_size / self.unet.config.sample_rate} after the denoising"""
""" process.""" )
_UpperCAmelCase = int(__UpperCamelCase )
_UpperCAmelCase = next(iter(self.unet.parameters() ) ).dtype
_UpperCAmelCase = (batch_size, self.unet.config.in_channels, sample_size)
if isinstance(__UpperCamelCase , __UpperCamelCase ) and len(__UpperCamelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(__UpperCamelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase = randn_tensor(__UpperCamelCase , generator=__UpperCamelCase , device=self.device , dtype=__UpperCamelCase )
# set step values
self.scheduler.set_timesteps(__UpperCamelCase , device=audio.device )
_UpperCAmelCase = self.scheduler.timesteps.to(__UpperCamelCase )
for t in self.progress_bar(self.scheduler.timesteps ):
# 1. predict noise model_output
_UpperCAmelCase = self.unet(__UpperCamelCase , __UpperCamelCase ).sample
# 2. compute previous image: x_t -> t_t-1
_UpperCAmelCase = self.scheduler.step(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).prev_sample
_UpperCAmelCase = audio.clamp(-1 , 1 ).float().cpu().numpy()
_UpperCAmelCase = audio[:, :, :original_sample_size]
if not return_dict:
return (audio,)
return AudioPipelineOutput(audios=__UpperCamelCase ) | 702 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( _UpperCAmelCase ):
a : Any = ['image_processor', 'tokenizer']
a : Optional[int] = 'AutoImageProcessor'
a : Any = 'AutoTokenizer'
def __init__( self : List[str] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=None , **__UpperCamelCase : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop("""feature_extractor""" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def __call__( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->List[str]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""images""" , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""text""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_UpperCAmelCase = self.image_processor(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if text is not None:
_UpperCAmelCase = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCAmelCase = encodings["""input_ids"""]
return inputs
def _snake_case ( self : Union[str, Any] , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ) ->Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ) ->int:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@contextmanager
def _snake_case ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer
yield
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def _snake_case ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Union[str, Any]=None ) ->List[str]:
'''simple docstring'''
if added_vocab is None:
_UpperCAmelCase = self.tokenizer.get_added_vocab()
_UpperCAmelCase = {}
while tokens:
_UpperCAmelCase = re.search(r"""<s_(.*?)>""" , __UpperCamelCase , re.IGNORECASE )
if start_token is None:
break
_UpperCAmelCase = start_token.group(1 )
_UpperCAmelCase = re.search(rf"""</s_{key}>""" , __UpperCamelCase , re.IGNORECASE )
_UpperCAmelCase = start_token.group()
if end_token is None:
_UpperCAmelCase = tokens.replace(__UpperCamelCase , """""" )
else:
_UpperCAmelCase = end_token.group()
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __UpperCamelCase , re.IGNORECASE )
if content is not None:
_UpperCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_UpperCAmelCase = self.tokenajson(__UpperCamelCase , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if value:
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = value[0]
_UpperCAmelCase = value
else: # leaf nodes
_UpperCAmelCase = []
for leaf in content.split(r"""<sep/>""" ):
_UpperCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_UpperCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCamelCase )
if len(output[key] ) == 1:
_UpperCAmelCase = output[key][0]
_UpperCAmelCase = tokens[tokens.find(__UpperCamelCase ) + len(__UpperCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if len(__UpperCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , )
return self.image_processor | 19 | 0 |
"""simple docstring"""
import argparse
import pytorch_lightning as pl
import torch
from torch import nn
from transformers import LongformerForQuestionAnswering, LongformerModel
class a_ ( pl.LightningModule ):
def __init__( self : List[str] , __UpperCamelCase : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
super().__init__()
_UpperCAmelCase = model
_UpperCAmelCase = 2
_UpperCAmelCase = nn.Linear(self.model.config.hidden_size , self.num_labels )
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
pass
def _UpperCamelCase ( _A , _A , _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = LongformerModel.from_pretrained(_A )
_UpperCAmelCase = LightningModel(_A )
_UpperCAmelCase = torch.load(_A , map_location=torch.device("""cpu""" ) )
lightning_model.load_state_dict(ckpt["""state_dict"""] )
# init longformer question answering model
_UpperCAmelCase = LongformerForQuestionAnswering.from_pretrained(_A )
# transfer weights
longformer_for_qa.longformer.load_state_dict(lightning_model.model.state_dict() )
longformer_for_qa.qa_outputs.load_state_dict(lightning_model.qa_outputs.state_dict() )
longformer_for_qa.eval()
# save model
longformer_for_qa.save_pretrained(_A )
print(F"""Conversion successful. Model saved under {pytorch_dump_folder_path}""" )
if __name__ == "__main__":
a : List[str] = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--longformer_model''',
default=None,
type=str,
required=True,
help='''model identifier of longformer. Should be either `longformer-base-4096` or `longformer-large-4096`.''',
)
parser.add_argument(
'''--longformer_question_answering_ckpt_path''',
default=None,
type=str,
required=True,
help='''Path the official PyTorch Lightning Checkpoint.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : Optional[int] = parser.parse_args()
convert_longformer_qa_checkpoint_to_pytorch(
args.longformer_model, args.longformer_question_answering_ckpt_path, args.pytorch_dump_folder_path
) | 703 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( _A , _A , _A ) -> float:
"""simple docstring"""
_UpperCAmelCase = x
_UpperCAmelCase = y
for step in range(_A ): # noqa: B007
_UpperCAmelCase = a * a - b * b + x
_UpperCAmelCase = 2 * a * b + y
_UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_A , 1 , 1 ) )
def _UpperCamelCase ( _A = 8_0_0 , _A = 6_0_0 , _A = -0.6 , _A = 0 , _A = 3.2 , _A = 5_0 , _A = True , ) -> Image.Image:
"""simple docstring"""
_UpperCAmelCase = Image.new("""RGB""" , (image_width, image_height) )
_UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(_A ):
for image_y in range(_A ):
# determine the figure-coordinates based on the image-coordinates
_UpperCAmelCase = figure_width / image_width * image_height
_UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
_UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
_UpperCAmelCase = get_distance(_A , _A , _A )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_UpperCAmelCase = get_color_coded_rgb(_A )
else:
_UpperCAmelCase = get_black_and_white_rgb(_A )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 19 | 0 |
"""simple docstring"""
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = int(_A )
if decimal in (0, 1): # Exit cases for the recursion
return str(_A )
_UpperCAmelCase ,_UpperCAmelCase = divmod(_A , 2 )
return binary_recursive(_A ) + str(_A )
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = str(_A ).strip()
if not number:
raise ValueError("""No input value was provided""" )
_UpperCAmelCase = """-""" if number.startswith("""-""" ) else """"""
_UpperCAmelCase = number.lstrip("""-""" )
if not number.isnumeric():
raise ValueError("""Input value is not an integer""" )
return F"""{negative}0b{binary_recursive(int(_A ) )}"""
if __name__ == "__main__":
from doctest import testmod
testmod() | 704 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a_ ( nn.Module ):
def __init__( self : List[str] , __UpperCamelCase : int = 16 , __UpperCamelCase : int = 88 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 32 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "geglu" , __UpperCamelCase : Optional[int] = None , ) ->Dict:
'''simple docstring'''
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__UpperCamelCase , attention_head_dim=__UpperCamelCase , in_channels=__UpperCamelCase , num_layers=__UpperCamelCase , dropout=__UpperCamelCase , norm_num_groups=__UpperCamelCase , cross_attention_dim=__UpperCamelCase , attention_bias=__UpperCamelCase , sample_size=__UpperCamelCase , num_vector_embeds=__UpperCamelCase , activation_fn=__UpperCamelCase , num_embeds_ada_norm=__UpperCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def _snake_case ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : bool = True , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase , cross_attention_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
import math
import flax.linen as nn
import jax.numpy as jnp
def _UpperCamelCase ( _A , _A , _A = 1 , _A = 1 , _A = 1.0e4 , _A = False , _A = 1.0 , ) -> jnp.ndarray:
"""simple docstring"""
assert timesteps.ndim == 1, "Timesteps should be a 1d-array"
assert embedding_dim % 2 == 0, F"""Embedding dimension {embedding_dim} should be even"""
_UpperCAmelCase = float(embedding_dim // 2 )
_UpperCAmelCase = math.log(max_timescale / min_timescale ) / (num_timescales - freq_shift)
_UpperCAmelCase = min_timescale * jnp.exp(jnp.arange(_A , dtype=jnp.floataa ) * -log_timescale_increment )
_UpperCAmelCase = jnp.expand_dims(_A , 1 ) * jnp.expand_dims(_A , 0 )
# scale embeddings
_UpperCAmelCase = scale * emb
if flip_sin_to_cos:
_UpperCAmelCase = jnp.concatenate([jnp.cos(_A ), jnp.sin(_A )] , axis=1 )
else:
_UpperCAmelCase = jnp.concatenate([jnp.sin(_A ), jnp.cos(_A )] , axis=1 )
_UpperCAmelCase = jnp.reshape(_A , [jnp.shape(_A )[0], embedding_dim] )
return signal
class a_ ( nn.Module ):
a : int = 32
a : jnp.dtype = jnp.floataa
@nn.compact
def __call__( self : Optional[Any] , __UpperCamelCase : Any ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_1""" )(__UpperCamelCase )
_UpperCAmelCase = nn.silu(__UpperCamelCase )
_UpperCAmelCase = nn.Dense(self.time_embed_dim , dtype=self.dtype , name="""linear_2""" )(__UpperCamelCase )
return temb
class a_ ( nn.Module ):
a : int = 32
a : bool = False
a : float = 1
@nn.compact
def __call__( self : Dict , __UpperCamelCase : Union[str, Any] ) ->List[str]:
'''simple docstring'''
return get_sinusoidal_embeddings(
__UpperCamelCase , embedding_dim=self.dim , flip_sin_to_cos=self.flip_sin_to_cos , freq_shift=self.freq_shift ) | 705 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( _A , _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = LxmertConfig.from_json_file(_A )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase = LxmertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_A , _A , _A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 19 | 0 |
import inspect
import tempfile
import unittest
from huggingface_hub import hf_hub_download
from transformers import is_torch_available
from transformers.testing_utils import is_flaky, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
a : int = 1E-4
if is_torch_available():
import torch
from transformers import AutoformerConfig, AutoformerForPrediction, AutoformerModel
from transformers.models.autoformer.modeling_autoformer import AutoformerDecoder, AutoformerEncoder
@require_torch
class a_ :
def __init__( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int]=16 , __UpperCamelCase : Union[str, Any]=13 , __UpperCamelCase : Optional[Any]=7 , __UpperCamelCase : Dict=14 , __UpperCamelCase : Tuple=10 , __UpperCamelCase : Any=19 , __UpperCamelCase : int=5 , __UpperCamelCase : str=4 , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[int]=16 , __UpperCamelCase : str=2 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : int=4 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Union[str, Any]=[1, 2, 3, 4, 5] , __UpperCamelCase : Tuple=25 , __UpperCamelCase : List[Any]=5 , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = d_model
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length
_UpperCAmelCase = cardinality
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = embedding_dimension
_UpperCAmelCase = is_training
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = context_length
_UpperCAmelCase = prediction_length + label_length
_UpperCAmelCase = label_length
_UpperCAmelCase = moving_average
_UpperCAmelCase = autocorrelation_factor
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
return AutoformerConfig(
d_model=self.d_model , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , prediction_length=self.prediction_length , context_length=self.context_length , label_length=self.label_length , lags_sequence=self.lags_sequence , num_time_features=self.num_time_features , num_static_categorical_features=1 , cardinality=[self.cardinality] , embedding_dimension=[self.embedding_dimension] , moving_average=self.moving_average , )
def _snake_case ( self : Optional[int] , __UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = config.context_length + max(config.lags_sequence )
_UpperCAmelCase = ids_tensor([self.batch_size, 1] , config.cardinality[0] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] )
_UpperCAmelCase = floats_tensor([self.batch_size, _past_length] ) > 0.5
# decoder inputs
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length, config.num_time_features] )
_UpperCAmelCase = floats_tensor([self.batch_size, config.prediction_length] )
_UpperCAmelCase = {
"""past_values""": past_values,
"""static_categorical_features""": static_categorical_features,
"""past_time_features""": past_time_features,
"""past_observed_mask""": past_observed_mask,
"""future_time_features""": future_time_features,
"""future_values""": future_values,
}
return inputs_dict
def _snake_case ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = self.prepare_autoformer_inputs_dict(__UpperCamelCase )
return config, inputs_dict
def _snake_case ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Tuple ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = AutoformerModel(config=__UpperCamelCase ).to(__UpperCamelCase ).eval()
_UpperCAmelCase = model(**__UpperCamelCase )
_UpperCAmelCase = outputs.encoder_last_hidden_state
_UpperCAmelCase = outputs.last_hidden_state
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_encoder()
encoder.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = AutoformerEncoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = model.create_network_inputs(**__UpperCamelCase )
_UpperCAmelCase ,_UpperCAmelCase = model.decomposition_layer(transformer_inputs[:, : config.context_length, ...] )
_UpperCAmelCase = torch.cat(
(transformer_inputs[:, : config.context_length, ...], feature[:, : config.context_length, ...]) , dim=-1 , )
_UpperCAmelCase = encoder(inputs_embeds=__UpperCamelCase )[0]
self.parent.assertTrue((encoder_last_hidden_state_a - encoder_last_hidden_state).abs().max().item() < 1e-3 )
_UpperCAmelCase = (
torch.mean(transformer_inputs[:, : config.context_length, ...] , dim=1 )
.unsqueeze(1 )
.repeat(1 , config.prediction_length , 1 )
)
_UpperCAmelCase = torch.zeros(
[transformer_inputs.shape[0], config.prediction_length, transformer_inputs.shape[2]] , device=enc_input.device , )
_UpperCAmelCase = torch.cat(
(
torch.cat((seasonal_input[:, -config.label_length :, ...], zeros) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
_UpperCAmelCase = torch.cat(
(
torch.cat((trend_input[:, -config.label_length :, ...], mean) , dim=1 ),
feature[:, config.context_length - config.label_length :, ...],
) , dim=-1 , )
with tempfile.TemporaryDirectory() as tmpdirname:
_UpperCAmelCase = model.get_decoder()
decoder.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = AutoformerDecoder.from_pretrained(__UpperCamelCase ).to(__UpperCamelCase )
_UpperCAmelCase = decoder(
trend=__UpperCamelCase , inputs_embeds=__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , )[0]
self.parent.assertTrue((last_hidden_state_a - last_hidden_state).abs().max().item() < 1e-3 )
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (AutoformerModel, AutoformerForPrediction) if is_torch_available() else ()
a : Any = (AutoformerForPrediction,) if is_torch_available() else ()
a : Optional[Any] = {'feature-extraction': AutoformerModel} if is_torch_available() else {}
a : Optional[int] = False
a : List[str] = False
a : Optional[int] = False
a : Optional[int] = False
a : List[Any] = False
a : Optional[int] = False
def _snake_case ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = AutoformerModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase )
def _snake_case ( self : str ) ->int:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(__UpperCamelCase )
_UpperCAmelCase ,_UpperCAmelCase = model_class.from_pretrained(__UpperCamelCase , output_loading_info=__UpperCamelCase )
self.assertEqual(info["""missing_keys"""] , [] )
def _snake_case ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_encoder_decoder_model_standalone(*__UpperCamelCase )
@unittest.skip(reason="""Model has no tokens embeddings""" )
def _snake_case ( self : Union[str, Any] ) ->int:
'''simple docstring'''
pass
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase = inspect.signature(getattr(__UpperCamelCase , """forward""" ) )
# The main input is the name of the argument after `self`
_UpperCAmelCase = list(model_signature.parameters.keys() )[1]
self.assertEqual(AutoformerModel.main_input_name , __UpperCamelCase )
def _snake_case ( self : Any ) ->str:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = [
"""past_values""",
"""past_time_features""",
"""past_observed_mask""",
"""static_categorical_features""",
"""static_real_features""",
"""future_values""",
"""future_time_features""",
]
if model.__class__.__name__ in ["AutoformerForPrediction"]:
expected_arg_names.append("""future_observed_mask""" )
expected_arg_names.extend(
[
"""decoder_attention_mask""",
"""head_mask""",
"""decoder_head_mask""",
"""cross_attn_head_mask""",
"""encoder_outputs""",
"""past_key_values""",
"""output_hidden_states""",
"""output_attentions""",
"""use_cache""",
"""return_dict""",
] )
self.assertListEqual(arg_names[: len(__UpperCamelCase )] , __UpperCamelCase )
def _snake_case ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
_UpperCAmelCase = getattr(self.model_tester , """seq_length""" , __UpperCamelCase )
_UpperCAmelCase = getattr(self.model_tester , """decoder_seq_length""" , __UpperCamelCase )
_UpperCAmelCase = getattr(self.model_tester , """encoder_seq_length""" , __UpperCamelCase )
_UpperCAmelCase = getattr(self.model_tester , """d_model""" , __UpperCamelCase )
_UpperCAmelCase = getattr(self.model_tester , """num_attention_heads""" , __UpperCamelCase )
_UpperCAmelCase = d_model // num_attention_heads
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = outputs.encoder_attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
_UpperCAmelCase = len(__UpperCamelCase )
_UpperCAmelCase = 7
if "last_hidden_state" in outputs:
correct_outlen += 1
if "trend" in outputs:
correct_outlen += 1
if "past_key_values" in outputs:
correct_outlen += 1 # past_key_values have been returned
if "loss" in outputs:
correct_outlen += 1
if "params" in outputs:
correct_outlen += 1
self.assertEqual(__UpperCamelCase , __UpperCamelCase )
# decoder attentions
_UpperCAmelCase = outputs.decoder_attentions
self.assertIsInstance(__UpperCamelCase , (list, tuple) )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# cross attentions
_UpperCAmelCase = outputs.cross_attentions
self.assertIsInstance(__UpperCamelCase , (list, tuple) )
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(cross_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, decoder_seq_length, dim] , )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(__UpperCamelCase , __UpperCamelCase ) )
self.assertEqual(out_len + 2 , len(__UpperCamelCase ) )
_UpperCAmelCase = outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions
self.assertEqual(len(__UpperCamelCase ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, encoder_seq_length, dim] , )
@is_flaky()
def _snake_case ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
super().test_retain_grad_hidden_states_attentions()
def _UpperCamelCase ( _A="train-batch.pt" ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = hf_hub_download(repo_id="""hf-internal-testing/tourism-monthly-batch""" , filename=_A , repo_type="""dataset""" )
_UpperCAmelCase = torch.load(_A , map_location=_A )
return batch
@require_torch
@slow
class a_ ( unittest.TestCase ):
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = AutoformerModel.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__UpperCamelCase )
_UpperCAmelCase = prepare_batch()
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , future_values=batch["""future_values"""] , future_time_features=batch["""future_time_features"""] , )[0]
_UpperCAmelCase = torch.Size(
(64, model.config.prediction_length + model.config.label_length, model.config.feature_size) )
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[0.3_5_9_3, -1.3_3_9_8, 0.6_3_3_0], [0.2_2_7_9, 1.5_3_9_6, -0.1_7_9_2], [0.0_4_5_0, 1.3_2_2_5, -0.2_3_3_5]] , device=__UpperCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
def _snake_case ( self : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__UpperCamelCase )
_UpperCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_UpperCAmelCase = model(
past_values=batch["""past_values"""] , past_time_features=batch["""past_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , static_categorical_features=batch["""static_categorical_features"""] , ).encoder_last_hidden_state
_UpperCAmelCase = torch.Size((64, model.config.context_length, model.config.d_model) )
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[-0.0_7_3_4, -0.9_0_3_6, 0.8_3_5_8], [4.7_1_8_6, 2.4_1_1_3, 1.9_5_8_1], [1.7_9_5_3, 2.3_5_5_8, 1.2_9_7_0]] , device=__UpperCamelCase )
self.assertTrue(torch.allclose(output[0, :3, :3] , __UpperCamelCase , atol=__UpperCamelCase ) )
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
_UpperCAmelCase = AutoformerForPrediction.from_pretrained("""huggingface/autoformer-tourism-monthly""" ).to(__UpperCamelCase )
_UpperCAmelCase = prepare_batch("""val-batch.pt""" )
with torch.no_grad():
_UpperCAmelCase = model.generate(
static_categorical_features=batch["""static_categorical_features"""] , past_time_features=batch["""past_time_features"""] , past_values=batch["""past_values"""] , future_time_features=batch["""future_time_features"""] , past_observed_mask=batch["""past_observed_mask"""] , )
_UpperCAmelCase = torch.Size((64, model.config.num_parallel_samples, model.config.prediction_length) )
self.assertEqual(outputs.sequences.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor([31_30.67_63, 40_56.52_93, 70_53.07_86] , device=__UpperCamelCase )
_UpperCAmelCase = outputs.sequences.mean(dim=1 )
self.assertTrue(torch.allclose(mean_prediction[0, -3:] , __UpperCamelCase , rtol=1e-1 ) ) | 706 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
a : str = '''examples/'''
a : List[str] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a : Tuple = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
a : List[str] = '''README.md'''
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase ,_UpperCAmelCase = REPLACE_PATTERNS[pattern]
_UpperCAmelCase = replace.replace("""VERSION""" , _A )
_UpperCAmelCase = re_pattern.sub(_A , _A )
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_A )
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern="""examples""" )
def _UpperCamelCase ( _A , _A=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
_UpperCAmelCase = """🤗 Transformers currently provides the following architectures"""
_UpperCAmelCase = """1. Want to contribute a new model?"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase = f.readlines()
# Find the start of the list.
_UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
_UpperCAmelCase = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_A )
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = REPLACE_PATTERNS["""init"""][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def _UpperCamelCase ( _A=False ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
_UpperCAmelCase = default_version.base_version
elif patch:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_UpperCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
_UpperCAmelCase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
def _UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_version()
_UpperCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
_UpperCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
_UpperCAmelCase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_A )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work() | 19 | 0 |
"""simple docstring"""
from random import randint, random
def _UpperCamelCase ( _A , _A , _A , _A = False , _A = False , _A = 5 , ) -> list:
"""simple docstring"""
_UpperCAmelCase = [[-1] * number_of_cells] # Create a highway without any car
_UpperCAmelCase = 0
_UpperCAmelCase = max(_A , 0 )
while i < number_of_cells:
_UpperCAmelCase = (
randint(0 , _A ) if random_speed else initial_speed
) # Place the cars
i += (
randint(1 , max_speed * 2 ) if random_frequency else frequency
) # Arbitrary number, may need tuning
return highway
def _UpperCamelCase ( _A , _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = highway_now[car_index + 1 :]
for cell in range(len(_A ) ): # May need a better name for this
if cells[cell] != -1: # If the cell is not empty then
return distance # we have the distance we wanted
distance += 1
# Here if the car is near the end of the highway
return distance + get_distance(_A , -1 )
def _UpperCamelCase ( _A , _A , _A ) -> list:
"""simple docstring"""
_UpperCAmelCase = len(_A )
# Beforce calculations, the highway is empty
_UpperCAmelCase = [-1] * number_of_cells
for car_index in range(_A ):
if highway_now[car_index] != -1:
# Add 1 to the current speed of the car and cap the speed
_UpperCAmelCase = min(highway_now[car_index] + 1 , _A )
# Number of empty cell before the next car
_UpperCAmelCase = get_distance(_A , _A ) - 1
# We can't have the car causing an accident
_UpperCAmelCase = min(next_highway[car_index] , _A )
if random() < probability:
# Randomly, a driver will slow down
_UpperCAmelCase = max(next_highway[car_index] - 1 , 0 )
return next_highway
def _UpperCamelCase ( _A , _A , _A , _A ) -> list:
"""simple docstring"""
_UpperCAmelCase = len(highway[0] )
for i in range(_A ):
_UpperCAmelCase = update(highway[i] , _A , _A )
_UpperCAmelCase = [-1] * number_of_cells
for car_index in range(_A ):
_UpperCAmelCase = next_speeds_calculated[car_index]
if speed != -1:
# Change the position based on the speed (with % to create the loop)
_UpperCAmelCase = (car_index + speed) % number_of_cells
# Commit the change of position
_UpperCAmelCase = speed
highway.append(_A )
return highway
if __name__ == "__main__":
import doctest
doctest.testmod() | 707 |
"""simple docstring"""
from __future__ import annotations
def _UpperCamelCase ( _A ) -> None:
"""simple docstring"""
create_state_space_tree(_A , [] , 0 , [0 for i in range(len(_A ) )] )
def _UpperCamelCase ( _A , _A , _A , _A , ) -> None:
"""simple docstring"""
if index == len(_A ):
print(_A )
return
for i in range(len(_A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_UpperCAmelCase = True
create_state_space_tree(_A , _A , index + 1 , _A )
current_sequence.pop()
_UpperCAmelCase = False
a : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
a : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 19 | 0 |
"""simple docstring"""
import os
import tempfile
import unittest
from transformers import FlaubertConfig, is_torch_available
from transformers.testing_utils import require_torch, require_torch_gpu, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
FlaubertForMultipleChoice,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertModel,
FlaubertWithLMHeadModel,
)
from transformers.models.flaubert.modeling_flaubert import FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ ( _UpperCAmelCase ):
def __init__( self : int , __UpperCamelCase : int , __UpperCamelCase : Any=13 , __UpperCamelCase : Tuple=7 , __UpperCamelCase : str=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Dict=True , __UpperCamelCase : List[str]=True , __UpperCamelCase : Tuple=True , __UpperCamelCase : Dict=False , __UpperCamelCase : List[str]=False , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : List[str]=0 , __UpperCamelCase : List[str]=32 , __UpperCamelCase : List[Any]=5 , __UpperCamelCase : Dict=4 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Optional[Any]=5_12 , __UpperCamelCase : Optional[int]=12 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : str=0.0_2 , __UpperCamelCase : Any=3 , __UpperCamelCase : Dict=4 , __UpperCamelCase : Tuple="last" , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Dict=None , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_lengths
_UpperCAmelCase = use_token_type_ids
_UpperCAmelCase = use_labels
_UpperCAmelCase = gelu_activation
_UpperCAmelCase = sinusoidal_embeddings
_UpperCAmelCase = causal
_UpperCAmelCase = asm
_UpperCAmelCase = n_langs
_UpperCAmelCase = vocab_size
_UpperCAmelCase = n_special
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = num_choices
_UpperCAmelCase = summary_type
_UpperCAmelCase = use_proj
_UpperCAmelCase = scope
def _snake_case ( self : Any ) ->str:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
_UpperCAmelCase = None
if self.use_input_lengths:
_UpperCAmelCase = (
ids_tensor([self.batch_size] , vocab_size=2 ) + self.seq_length - 2
) # small variation of seq_length
_UpperCAmelCase = None
if self.use_token_type_ids:
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.n_langs )
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
_UpperCAmelCase = ids_tensor([self.batch_size] , 2 ).float()
_UpperCAmelCase = ids_tensor([self.batch_size] , self.num_choices )
_UpperCAmelCase = self.get_config()
return (
config,
input_ids,
token_type_ids,
input_lengths,
sequence_labels,
token_labels,
is_impossible_labels,
choice_labels,
input_mask,
)
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
return FlaubertConfig(
vocab_size=self.vocab_size , n_special=self.n_special , emb_dim=self.hidden_size , n_layers=self.num_hidden_layers , n_heads=self.num_attention_heads , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , gelu_activation=self.gelu_activation , sinusoidal_embeddings=self.sinusoidal_embeddings , asm=self.asm , causal=self.causal , n_langs=self.n_langs , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , summary_type=self.summary_type , use_proj=self.use_proj , )
def _snake_case ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : int , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = FlaubertModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , lengths=__UpperCamelCase , langs=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , langs=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : int , __UpperCamelCase : Tuple , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : str , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = FlaubertWithLMHeadModel(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def _snake_case ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , ) ->Any:
'''simple docstring'''
_UpperCAmelCase = FlaubertForQuestionAnsweringSimple(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def _snake_case ( self : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , ) ->Any:
'''simple docstring'''
_UpperCAmelCase = FlaubertForQuestionAnswering(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(
__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , cls_index=__UpperCamelCase , is_impossible=__UpperCamelCase , p_mask=__UpperCamelCase , )
_UpperCAmelCase = model(
__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase , cls_index=__UpperCamelCase , is_impossible=__UpperCamelCase , )
((_UpperCAmelCase) ,) = result_with_labels.to_tuple()
_UpperCAmelCase = model(__UpperCamelCase , start_positions=__UpperCamelCase , end_positions=__UpperCamelCase )
((_UpperCAmelCase) ,) = result_with_labels.to_tuple()
self.parent.assertEqual(result_with_labels.loss.shape , () )
self.parent.assertEqual(result.start_top_log_probs.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(result.start_top_index.shape , (self.batch_size, model.config.start_n_top) )
self.parent.assertEqual(
result.end_top_log_probs.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(
result.end_top_index.shape , (self.batch_size, model.config.start_n_top * model.config.end_n_top) )
self.parent.assertEqual(result.cls_logits.shape , (self.batch_size,) )
def _snake_case ( self : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = FlaubertForSequenceClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[int] , ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = FlaubertForTokenClassification(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , attention_mask=__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def _snake_case ( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.num_choices
_UpperCAmelCase = FlaubertForMultipleChoice(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
_UpperCAmelCase = model(
__UpperCamelCase , attention_mask=__UpperCamelCase , token_type_ids=__UpperCamelCase , labels=__UpperCamelCase , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def _snake_case ( self : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.prepare_config_and_inputs()
(
(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,(
_UpperCAmelCase
) ,
) = config_and_inputs
_UpperCAmelCase = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""lengths""": input_lengths,
"""attention_mask""": input_mask,
}
return config, inputs_dict
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : Dict = (
(
FlaubertModel,
FlaubertWithLMHeadModel,
FlaubertForQuestionAnswering,
FlaubertForQuestionAnsweringSimple,
FlaubertForSequenceClassification,
FlaubertForTokenClassification,
FlaubertForMultipleChoice,
)
if is_torch_available()
else ()
)
a : Tuple = (
{
'feature-extraction': FlaubertModel,
'fill-mask': FlaubertWithLMHeadModel,
'question-answering': FlaubertForQuestionAnsweringSimple,
'text-classification': FlaubertForSequenceClassification,
'token-classification': FlaubertForTokenClassification,
'zero-shot': FlaubertForSequenceClassification,
}
if is_torch_available()
else {}
)
def _snake_case ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str ) ->Union[str, Any]:
'''simple docstring'''
if (
pipeline_test_casse_name == "QAPipelineTests"
and tokenizer_name is not None
and not tokenizer_name.endswith("""Fast""" )
):
# `QAPipelineTests` fails for a few models when the slower tokenizer are used.
# (The slower tokenizers were never used for pipeline tests before the pipeline testing rework)
# TODO: check (and possibly fix) the `QAPipelineTests` with slower tokenizer
return True
return False
def _snake_case ( self : Dict , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple=False ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = super()._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
if return_labels:
if model_class.__name__ == "FlaubertForQuestionAnswering":
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
_UpperCAmelCase = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=__UpperCamelCase )
return inputs_dict
def _snake_case ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = FlaubertModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , emb_dim=37 )
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_model(*__UpperCamelCase )
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_lm_head(*__UpperCamelCase )
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_simple_qa(*__UpperCamelCase )
def _snake_case ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_qa(*__UpperCamelCase )
def _snake_case ( self : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_sequence_classif(*__UpperCamelCase )
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_token_classif(*__UpperCamelCase )
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_flaubert_multiple_choice(*__UpperCamelCase )
@slow
def _snake_case ( self : Any ) ->List[Any]:
'''simple docstring'''
for model_name in FLAUBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = FlaubertModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
@require_torch_gpu
def _snake_case ( self : Tuple ) ->int:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
# FlauBertForMultipleChoice behaves incorrectly in JIT environments.
if model_class == FlaubertForMultipleChoice:
return
_UpperCAmelCase = True
_UpperCAmelCase = model_class(config=__UpperCamelCase )
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = torch.jit.trace(
__UpperCamelCase , (inputs_dict["""input_ids"""].to("""cpu""" ), inputs_dict["""attention_mask"""].to("""cpu""" )) )
with tempfile.TemporaryDirectory() as tmp:
torch.jit.save(__UpperCamelCase , os.path.join(__UpperCamelCase , """traced_model.pt""" ) )
_UpperCAmelCase = torch.jit.load(os.path.join(__UpperCamelCase , """traced_model.pt""" ) , map_location=__UpperCamelCase )
loaded(inputs_dict["""input_ids"""].to(__UpperCamelCase ) , inputs_dict["""attention_mask"""].to(__UpperCamelCase ) )
@require_torch
class a_ ( unittest.TestCase ):
@slow
def _snake_case ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = FlaubertModel.from_pretrained("""flaubert/flaubert_base_cased""" )
_UpperCAmelCase = torch.tensor([[0, 3_45, 2_32, 3_28, 7_40, 1_40, 16_95, 69, 60_78, 15_88, 2]] )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase )[0]
_UpperCAmelCase = torch.Size((1, 11, 7_68) )
self.assertEqual(output.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[-2.6_2_5_1, -1.4_2_9_8, -0.0_2_2_7], [-2.8_5_1_0, -1.6_3_8_7, 0.2_2_5_8], [-2.8_1_1_4, -1.1_8_3_2, -0.3_0_6_6]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __UpperCamelCase , atol=1e-4 ) ) | 708 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : int=32 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : Dict=3 , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=32 , __UpperCamelCase : Any=4 , __UpperCamelCase : Optional[int]=[0, 1, 2, 3] , __UpperCamelCase : str=4 , __UpperCamelCase : Optional[Any]=37 , __UpperCamelCase : str="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Any=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : int=[1, 3_84, 24, 24] , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any=None , ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = backbone_out_indices
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = backbone_featmap_shape
_UpperCAmelCase = scope
_UpperCAmelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 1_92, 3_84, 7_68],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _snake_case ( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _snake_case ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a : int = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : str = False
a : List[str] = False
a : Dict = False
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = DPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def _snake_case ( self : Optional[int] ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
pass
def _snake_case ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def _snake_case ( self : str ) ->Any:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ):
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = False
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
_UpperCAmelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_UpperCAmelCase = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self : Dict ) ->Tuple:
'''simple docstring'''
pass
@slow
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_UpperCAmelCase = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = """add"""
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class a_ ( unittest.TestCase ):
def _snake_case ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
_UpperCAmelCase = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
_UpperCAmelCase = outputs.predicted_depth
# verify the predicted depth
_UpperCAmelCase = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , __UpperCamelCase , atol=1e-4 ) ) | 19 | 0 |
"""simple docstring"""
import heapq
def _UpperCamelCase ( _A ) -> set[int]:
"""simple docstring"""
_UpperCAmelCase = []
# for each node and his adjacency list add them and the rank of the node to queue
# using heapq module the queue will be filled like a Priority Queue
# heapq works with a min priority queue, so I used -1*len(v) to build it
for key, value in graph.items():
# O(log(n))
heapq.heappush(_A , [-1 * len(_A ), (key, value)] )
# chosen_vertices = set of chosen vertices
_UpperCAmelCase = set()
# while queue isn't empty and there are still edges
# (queue[0][0] is the rank of the node with max rank)
while queue and queue[0][0] != 0:
# extract vertex with max rank from queue and add it to chosen_vertices
_UpperCAmelCase = heapq.heappop(_A )[1][0]
chosen_vertices.add(_A )
# Remove all arcs adjacent to argmax
for elem in queue:
# if v haven't adjacent node, skip
if elem[0] == 0:
continue
# if argmax is reachable from elem
# remove argmax from elem's adjacent list and update his rank
if argmax in elem[1][1]:
_UpperCAmelCase = elem[1][1].index(_A )
del elem[1][1][index]
elem[0] += 1
# re-order the queue
heapq.heapify(_A )
return chosen_vertices
if __name__ == "__main__":
import doctest
doctest.testmod()
a : str = {0: [1, 3], 1: [0, 3], 2: [0, 3, 4], 3: [0, 1, 2], 4: [2, 3]}
print(F"Minimum vertex cover:\n{greedy_min_vertex_cover(graph)}") | 709 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a : List[str] = logging.get_logger(__name__)
class a_ ( enum.Enum ):
a : Optional[Any] = 0
a : Dict = 1
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'generated'
def __init__( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : str ) ->Any:
'''simple docstring'''
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _snake_case ( self : Optional[int] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Dict=None , **__UpperCamelCase : Any , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
if truncation is not None:
_UpperCAmelCase = truncation
_UpperCAmelCase = generate_kwargs
_UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
_UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCAmelCase = self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
_UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _snake_case ( self : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
return True
def _snake_case ( self : Optional[Any] , *__UpperCamelCase : Any , __UpperCamelCase : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
_UpperCAmelCase = ([prefix + arg for arg in args[0]],)
_UpperCAmelCase = True
elif isinstance(args[0] , __UpperCamelCase ):
_UpperCAmelCase = (prefix + args[0],)
_UpperCAmelCase = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
_UpperCAmelCase = self.tokenizer(*__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Dict , *__UpperCamelCase : str , **__UpperCamelCase : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = super().__call__(*__UpperCamelCase , **__UpperCamelCase )
if (
isinstance(args[0] , __UpperCamelCase )
and all(isinstance(__UpperCamelCase , __UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _snake_case ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : str=TruncationStrategy.DO_NOT_TRUNCATE , **__UpperCamelCase : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self._parse_and_tokenize(__UpperCamelCase , truncation=__UpperCamelCase , **__UpperCamelCase )
return inputs
def _snake_case ( self : str , __UpperCamelCase : Dict , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
if self.framework == "pt":
_UpperCAmelCase ,_UpperCAmelCase = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
_UpperCAmelCase ,_UpperCAmelCase = tf.shape(model_inputs["""input_ids"""] ).numpy()
_UpperCAmelCase = generate_kwargs.get("""min_length""" , self.model.config.min_length )
_UpperCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__UpperCamelCase , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
_UpperCAmelCase = self.model.generate(**__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCAmelCase = output_ids.reshape(__UpperCamelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
_UpperCAmelCase = tf.reshape(__UpperCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any]=ReturnType.TEXT , __UpperCamelCase : int=False ) ->Any:
'''simple docstring'''
_UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCAmelCase = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
_UpperCAmelCase = {
f"""{self.return_name}_text""": self.tokenizer.decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , )
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'summary'
def __call__( self : Optional[Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[int] ) ->Any:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : str , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : Optional[int] = 'translation'
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def _snake_case ( self : Tuple , *__UpperCamelCase : List[str] , __UpperCamelCase : Tuple=TruncationStrategy.DO_NOT_TRUNCATE , __UpperCamelCase : Tuple=None , __UpperCamelCase : Union[str, Any]=None ) ->Tuple:
'''simple docstring'''
if getattr(self.tokenizer , """_build_translation_inputs""" , __UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase , return_tensors=self.framework , truncation=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase , truncation=__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : int=None , __UpperCamelCase : int=None , **__UpperCamelCase : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
_UpperCAmelCase = src_lang
if tgt_lang is not None:
_UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCAmelCase = kwargs.get("""task""" , self.task )
_UpperCAmelCase = task.split("""_""" )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
_UpperCAmelCase = items[1]
_UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->int:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
import logging
import os
import threading
import time
try:
import warnings
except ImportError:
a : Optional[int] = None
try:
import msvcrt
except ImportError:
a : Optional[Any] = None
try:
import fcntl
except ImportError:
a : List[str] = None
# Backward compatibility
# ------------------------------------------------
try:
TimeoutError
except NameError:
a : Optional[Any] = OSError
# Data
# ------------------------------------------------
a : List[Any] = [
'''Timeout''',
'''BaseFileLock''',
'''WindowsFileLock''',
'''UnixFileLock''',
'''SoftFileLock''',
'''FileLock''',
]
a : int = '''3.0.12'''
a : Dict = None
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
global _logger
_UpperCAmelCase = _logger or logging.getLogger(__name__ )
return _logger
class a_ ( _UpperCAmelCase ):
def __init__( self : Any , __UpperCamelCase : Dict ) ->Tuple:
_UpperCAmelCase = lock_file
return None
def __str__( self : List[str] ) ->Tuple:
_UpperCAmelCase = f"""The file lock '{self.lock_file}' could not be acquired."""
return temp
class a_ :
def __init__( self : Tuple , __UpperCamelCase : Any ) ->Any:
_UpperCAmelCase = lock
return None
def __enter__( self : Any ) ->str:
return self.lock
def __exit__( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] ) ->List[str]:
self.lock.release()
return None
class a_ :
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict=-1 , __UpperCamelCase : Union[str, Any]=None ) ->Optional[Any]:
_UpperCAmelCase = max_filename_length if max_filename_length is not None else 2_55
# Hash the filename if it's too long
_UpperCAmelCase = self.hash_filename_if_too_long(__UpperCamelCase , __UpperCamelCase )
# The path to the lock file.
_UpperCAmelCase = lock_file
# The file descriptor for the *_lock_file* as it is returned by the
# os.open() function.
# This file lock is only NOT None, if the object currently holds the
# lock.
_UpperCAmelCase = None
# The default timeout value.
_UpperCAmelCase = timeout
# We use this lock primarily for the lock counter.
_UpperCAmelCase = threading.Lock()
# The lock counter is used for implementing the nested locking
# mechanism. Whenever the lock is acquired, the counter is increased and
# the lock is only released, when this value is 0 again.
_UpperCAmelCase = 0
return None
@property
def _snake_case ( self : Union[str, Any] ) ->Optional[Any]:
return self._lock_file
@property
def _snake_case ( self : Tuple ) ->Any:
return self._timeout
@timeout.setter
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Any ) ->int:
_UpperCAmelCase = float(__UpperCamelCase )
return None
def _snake_case ( self : Optional[int] ) ->List[Any]:
raise NotImplementedError()
def _snake_case ( self : str ) ->str:
raise NotImplementedError()
@property
def _snake_case ( self : Optional[int] ) ->int:
return self._lock_file_fd is not None
def _snake_case ( self : Any , __UpperCamelCase : List[Any]=None , __UpperCamelCase : int=0.0_5 ) ->Tuple:
if timeout is None:
_UpperCAmelCase = self.timeout
# Increment the number right at the beginning.
# We can still undo it, if something fails.
with self._thread_lock:
self._lock_counter += 1
_UpperCAmelCase = id(self )
_UpperCAmelCase = self._lock_file
_UpperCAmelCase = time.time()
try:
while True:
with self._thread_lock:
if not self.is_locked:
logger().debug(f"""Attempting to acquire lock {lock_id} on {lock_filename}""" )
self._acquire()
if self.is_locked:
logger().debug(f"""Lock {lock_id} acquired on {lock_filename}""" )
break
elif timeout >= 0 and time.time() - start_time > timeout:
logger().debug(f"""Timeout on acquiring lock {lock_id} on {lock_filename}""" )
raise Timeout(self._lock_file )
else:
logger().debug(
f"""Lock {lock_id} not acquired on {lock_filename}, waiting {poll_intervall} seconds ...""" )
time.sleep(__UpperCamelCase )
except: # noqa
# Something did go wrong, so decrement the counter.
with self._thread_lock:
_UpperCAmelCase = max(0 , self._lock_counter - 1 )
raise
return _Acquire_ReturnProxy(lock=self )
def _snake_case ( self : str , __UpperCamelCase : Tuple=False ) ->Dict:
with self._thread_lock:
if self.is_locked:
self._lock_counter -= 1
if self._lock_counter == 0 or force:
_UpperCAmelCase = id(self )
_UpperCAmelCase = self._lock_file
logger().debug(f"""Attempting to release lock {lock_id} on {lock_filename}""" )
self._release()
_UpperCAmelCase = 0
logger().debug(f"""Lock {lock_id} released on {lock_filename}""" )
return None
def __enter__( self : str ) ->Dict:
self.acquire()
return self
def __exit__( self : int , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] ) ->Optional[Any]:
self.release()
return None
def __del__( self : Any ) ->Tuple:
self.release(force=__UpperCamelCase )
return None
def _snake_case ( self : int , __UpperCamelCase : str , __UpperCamelCase : int ) ->str:
_UpperCAmelCase = os.path.basename(__UpperCamelCase )
if len(__UpperCamelCase ) > max_length and max_length > 0:
_UpperCAmelCase = os.path.dirname(__UpperCamelCase )
_UpperCAmelCase = str(hash(__UpperCamelCase ) )
_UpperCAmelCase = filename[: max_length - len(__UpperCamelCase ) - 8] + """...""" + hashed_filename + """.lock"""
return os.path.join(__UpperCamelCase , __UpperCamelCase )
else:
return path
class a_ ( _UpperCAmelCase ):
def __init__( self : Optional[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple=-1 , __UpperCamelCase : Dict=None ) ->int:
from .file_utils import relative_to_absolute_path
super().__init__(__UpperCamelCase , timeout=__UpperCamelCase , max_filename_length=__UpperCamelCase )
_UpperCAmelCase = """\\\\?\\""" + relative_to_absolute_path(self.lock_file )
def _snake_case ( self : List[str] ) ->List[Any]:
_UpperCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
try:
_UpperCAmelCase = os.open(self._lock_file , __UpperCamelCase )
except OSError:
pass
else:
try:
msvcrt.locking(__UpperCamelCase , msvcrt.LK_NBLCK , 1 )
except OSError:
os.close(__UpperCamelCase )
else:
_UpperCAmelCase = fd
return None
def _snake_case ( self : Tuple ) ->Tuple:
_UpperCAmelCase = self._lock_file_fd
_UpperCAmelCase = None
msvcrt.locking(__UpperCamelCase , msvcrt.LK_UNLCK , 1 )
os.close(__UpperCamelCase )
try:
os.remove(self._lock_file )
# Probably another instance of the application
# that acquired the file lock.
except OSError:
pass
return None
class a_ ( _UpperCAmelCase ):
def __init__( self : Any , __UpperCamelCase : Any , __UpperCamelCase : Tuple=-1 , __UpperCamelCase : Union[str, Any]=None ) ->Any:
_UpperCAmelCase = os.statvfs(os.path.dirname(__UpperCamelCase ) ).f_namemax
super().__init__(__UpperCamelCase , timeout=__UpperCamelCase , max_filename_length=__UpperCamelCase )
def _snake_case ( self : Optional[Any] ) ->Tuple:
_UpperCAmelCase = os.O_RDWR | os.O_CREAT | os.O_TRUNC
_UpperCAmelCase = os.open(self._lock_file , __UpperCamelCase )
try:
fcntl.flock(__UpperCamelCase , fcntl.LOCK_EX | fcntl.LOCK_NB )
except OSError:
os.close(__UpperCamelCase )
else:
_UpperCAmelCase = fd
return None
def _snake_case ( self : List[str] ) ->Optional[Any]:
_UpperCAmelCase = self._lock_file_fd
_UpperCAmelCase = None
fcntl.flock(__UpperCamelCase , fcntl.LOCK_UN )
os.close(__UpperCamelCase )
return None
class a_ ( _UpperCAmelCase ):
def _snake_case ( self : List[Any] ) ->Optional[Any]:
_UpperCAmelCase = os.O_WRONLY | os.O_CREAT | os.O_EXCL | os.O_TRUNC
try:
_UpperCAmelCase = os.open(self._lock_file , __UpperCamelCase )
except OSError:
pass
else:
_UpperCAmelCase = fd
return None
def _snake_case ( self : str ) ->List[str]:
os.close(self._lock_file_fd )
_UpperCAmelCase = None
try:
os.remove(self._lock_file )
# The file is already deleted and that's what we want.
except OSError:
pass
return None
a : str = None
if msvcrt:
a : Optional[int] = WindowsFileLock
elif fcntl:
a : Tuple = UnixFileLock
else:
a : Any = SoftFileLock
if warnings is not None:
warnings.warn('''only soft file lock is available''') | 710 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
@property
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__UpperCamelCase )
def _snake_case ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.dummy_uncond_unet
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = self.dummy_vq_model
_UpperCAmelCase = LDMPipeline(unet=__UpperCamelCase , vqvae=__UpperCamelCase , scheduler=__UpperCamelCase )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" ).images
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" , return_dict=__UpperCamelCase )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a_ ( unittest.TestCase ):
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=5 , output_type="""numpy""" ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCAmelCase = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 19 | 0 |
"""simple docstring"""
import numpy as np
from transformers import BatchFeature
from transformers.testing_utils import require_tf, require_torch
from .test_feature_extraction_common import FeatureExtractionSavingTestMixin
class a_ ( _UpperCAmelCase ):
# to overwrite at feature extractactor specific tests
a : Any = None
a : str = None
@property
def _snake_case ( self : int ) ->Optional[Any]:
'''simple docstring'''
return self.feat_extract_tester.prepare_feat_extract_dict()
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
self.assertTrue(hasattr(__UpperCamelCase , """feature_size""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """sampling_rate""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """padding_value""" ) )
def _snake_case ( self : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_UpperCAmelCase = feat_extract.model_input_names[0]
_UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
self.assertTrue(all(len(__UpperCamelCase ) == len(__UpperCamelCase ) for x, y in zip(__UpperCamelCase , processed_features[input_name] ) ) )
_UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__UpperCamelCase )
_UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""np""" )
_UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_torch
def _snake_case ( self : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_UpperCAmelCase = feat_extract.model_input_names[0]
_UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""pt""" )
_UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
@require_tf
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(equal_length=__UpperCamelCase )
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_UpperCAmelCase = feat_extract.model_input_names[0]
_UpperCAmelCase = BatchFeature({input_name: speech_inputs} , tensor_type="""tf""" )
_UpperCAmelCase = processed_features[input_name]
if len(batch_features_input.shape ) < 3:
_UpperCAmelCase = batch_features_input[:, :, None]
self.assertTrue(
batch_features_input.shape
== (self.feat_extract_tester.batch_size, len(speech_inputs[0] ), self.feat_extract_tester.feature_size) )
def _snake_case ( self : Tuple , __UpperCamelCase : List[str]=False ) ->Union[str, Any]:
'''simple docstring'''
def _inputs_have_equal_length(__UpperCamelCase : Any ):
_UpperCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(__UpperCamelCase ) != length:
return False
return True
def _inputs_are_equal(__UpperCamelCase : Union[str, Any] , __UpperCamelCase : int ):
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
return False
for input_slice_a, input_slice_a in zip(__UpperCamelCase , __UpperCamelCase ):
if not np.allclose(np.asarray(__UpperCamelCase ) , np.asarray(__UpperCamelCase ) , atol=1e-3 ):
return False
return True
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=__UpperCamelCase )
_UpperCAmelCase = feat_extract.model_input_names[0]
_UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
_UpperCAmelCase = self.feat_extract_tester.seq_length_diff
_UpperCAmelCase = self.feat_extract_tester.max_seq_length + pad_diff
_UpperCAmelCase = self.feat_extract_tester.min_seq_length
_UpperCAmelCase = self.feat_extract_tester.batch_size
_UpperCAmelCase = self.feat_extract_tester.feature_size
# test padding for List[int] + numpy
_UpperCAmelCase = feat_extract.pad(__UpperCamelCase , padding=__UpperCamelCase )
_UpperCAmelCase = input_a[input_name]
_UpperCAmelCase = feat_extract.pad(__UpperCamelCase , padding="""longest""" )
_UpperCAmelCase = input_a[input_name]
_UpperCAmelCase = feat_extract.pad(__UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[-1] ) )
_UpperCAmelCase = input_a[input_name]
_UpperCAmelCase = feat_extract.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""np""" )
_UpperCAmelCase = input_a[input_name]
# max_length parameter has to be provided when setting `padding="max_length"`
with self.assertRaises(__UpperCamelCase ):
feat_extract.pad(__UpperCamelCase , padding="""max_length""" )[input_name]
_UpperCAmelCase = feat_extract.pad(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , return_tensors="""np""" )
_UpperCAmelCase = input_a[input_name]
self.assertFalse(_inputs_have_equal_length(__UpperCamelCase ) )
self.assertTrue(_inputs_have_equal_length(__UpperCamelCase ) )
self.assertTrue(_inputs_have_equal_length(__UpperCamelCase ) )
self.assertTrue(_inputs_are_equal(__UpperCamelCase , __UpperCamelCase ) )
self.assertTrue(len(input_a[0] ) == pad_min_length )
self.assertTrue(len(input_a[1] ) == pad_min_length + pad_diff )
self.assertTrue(input_a.shape[:2] == (batch_size, len(input_a[0] )) )
self.assertTrue(input_a.shape[:2] == (batch_size, pad_max_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == input_a.shape[2] == feature_size )
# test padding for `pad_to_multiple_of` for List[int] + numpy
_UpperCAmelCase = feat_extract.pad(__UpperCamelCase , pad_to_multiple_of=10 )
_UpperCAmelCase = input_a[input_name]
_UpperCAmelCase = feat_extract.pad(__UpperCamelCase , padding="""longest""" , pad_to_multiple_of=10 )
_UpperCAmelCase = input_a[input_name]
_UpperCAmelCase = feat_extract.pad(
__UpperCamelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=__UpperCamelCase )
_UpperCAmelCase = input_a[input_name]
_UpperCAmelCase = feat_extract.pad(
__UpperCamelCase , padding="""max_length""" , pad_to_multiple_of=10 , max_length=__UpperCamelCase , return_tensors="""np""" , )
_UpperCAmelCase = input_a[input_name]
self.assertTrue(all(len(__UpperCamelCase ) % 10 == 0 for x in input_a ) )
self.assertTrue(_inputs_are_equal(__UpperCamelCase , __UpperCamelCase ) )
_UpperCAmelCase = pad_max_length if pad_max_length % 10 == 0 else (pad_max_length // 10 + 1) * 10
self.assertTrue(all(len(__UpperCamelCase ) == expected_mult_pad_length for x in input_a ) )
self.assertEqual(input_a.shape[:2] , (batch_size, expected_mult_pad_length) )
if feature_size > 1:
self.assertTrue(input_a.shape[2] == feature_size )
# Check padding value is correct
_UpperCAmelCase = (np.ones(self.feat_extract_tester.feature_size ) * feat_extract.padding_value).sum()
self.assertTrue(
abs(np.asarray(input_a[0] )[pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[1] )[pad_min_length + pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - pad_diff) )
< 1e-3 )
self.assertTrue(
abs(
np.asarray(input_a[2] )[pad_min_length + 2 * pad_diff :].sum()
- padding_vector_sum * (pad_max_length - pad_min_length - 2 * pad_diff) )
< 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (pad_max_length - pad_min_length) ) < 1e-3 )
self.assertTrue(
abs(input_a[0, pad_min_length:].sum() - padding_vector_sum * (expected_mult_pad_length - pad_min_length) )
< 1e-3 )
def _snake_case ( self : Dict , __UpperCamelCase : str=False ) ->List[Any]:
'''simple docstring'''
def _inputs_have_equal_length(__UpperCamelCase : Union[str, Any] ):
_UpperCAmelCase = len(input[0] )
for input_slice in input[1:]:
if len(__UpperCamelCase ) != length:
return False
return True
def _inputs_are_equal(__UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] ):
if len(__UpperCamelCase ) != len(__UpperCamelCase ):
return False
for input_slice_a, input_slice_a in zip(__UpperCamelCase , __UpperCamelCase ):
if not np.allclose(np.asarray(__UpperCamelCase ) , np.asarray(__UpperCamelCase ) , atol=1e-3 ):
return False
return True
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common(numpify=__UpperCamelCase )
_UpperCAmelCase = feat_extract.model_input_names[0]
_UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
# truncate to smallest
_UpperCAmelCase = feat_extract.pad(
__UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , truncation=__UpperCamelCase )
_UpperCAmelCase = input_a[input_name]
_UpperCAmelCase = feat_extract.pad(__UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) )
_UpperCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__UpperCamelCase ) )
self.assertFalse(_inputs_have_equal_length(__UpperCamelCase ) )
# truncate to smallest with np
_UpperCAmelCase = feat_extract.pad(
__UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" , truncation=__UpperCamelCase , )
_UpperCAmelCase = input_a[input_name]
_UpperCAmelCase = feat_extract.pad(
__UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , return_tensors="""np""" )
_UpperCAmelCase = input_a[input_name]
self.assertTrue(_inputs_have_equal_length(__UpperCamelCase ) )
self.assertTrue(input_a.shape[1] == len(speech_inputs[0] ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__UpperCamelCase ) )
# truncate to middle
_UpperCAmelCase = feat_extract.pad(
__UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=__UpperCamelCase , return_tensors="""np""" , )
_UpperCAmelCase = input_a[input_name]
_UpperCAmelCase = feat_extract.pad(
__UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , truncation=__UpperCamelCase )
_UpperCAmelCase = input_a[input_name]
_UpperCAmelCase = feat_extract.pad(
__UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[1] ) , return_tensors="""np""" )
_UpperCAmelCase = input_a[input_name]
self.assertTrue(input_a.shape[1] == len(speech_inputs[1] ) )
self.assertTrue(_inputs_have_equal_length(__UpperCamelCase ) )
self.assertTrue(_inputs_have_equal_length(__UpperCamelCase ) )
self.assertTrue(_inputs_are_equal(__UpperCamelCase , __UpperCamelCase ) )
# since truncation forces padding to be smaller than longest input
# function can't return `np.ndarray`, but has to return list
self.assertFalse(_inputs_have_equal_length(__UpperCamelCase ) )
self.assertTrue(len(input_a[-1] ) == len(speech_inputs[-1] ) )
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__UpperCamelCase ):
feat_extract.pad(__UpperCamelCase , truncation=__UpperCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__UpperCamelCase ):
feat_extract.pad(__UpperCamelCase , padding="""longest""" , truncation=__UpperCamelCase )[input_name]
# padding has to be max_length when setting `truncation=True`
with self.assertRaises(__UpperCamelCase ):
feat_extract.pad(__UpperCamelCase , padding="""longest""" , truncation=__UpperCamelCase )[input_name]
# max_length parameter has to be provided when setting `truncation=True` and padding="max_length"
with self.assertRaises(__UpperCamelCase ):
feat_extract.pad(__UpperCamelCase , padding="""max_length""" , truncation=__UpperCamelCase )[input_name]
# test truncation for `pad_to_multiple_of` for List[int] + numpy
_UpperCAmelCase = 12
_UpperCAmelCase = feat_extract.pad(
__UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__UpperCamelCase , truncation=__UpperCamelCase , )
_UpperCAmelCase = input_a[input_name]
_UpperCAmelCase = feat_extract.pad(
__UpperCamelCase , padding="""max_length""" , max_length=len(speech_inputs[0] ) , pad_to_multiple_of=__UpperCamelCase , )
_UpperCAmelCase = input_a[input_name]
# retrieve expected_length as multiple of pad_to_multiple_of
_UpperCAmelCase = len(speech_inputs[0] )
if expected_length % pad_to_multiple_of != 0:
_UpperCAmelCase = ((len(speech_inputs[0] ) // pad_to_multiple_of) + 1) * pad_to_multiple_of
self.assertTrue(len(input_a[0] ) == expected_length )
self.assertTrue(_inputs_have_equal_length(__UpperCamelCase ) )
self.assertFalse(_inputs_have_equal_length(__UpperCamelCase ) )
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
self._check_padding(numpify=__UpperCamelCase )
def _snake_case ( self : Dict ) ->Dict:
'''simple docstring'''
self._check_padding(numpify=__UpperCamelCase )
def _snake_case ( self : str ) ->Dict:
'''simple docstring'''
self._check_truncation(numpify=__UpperCamelCase )
def _snake_case ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
self._check_truncation(numpify=__UpperCamelCase )
@require_torch
def _snake_case ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCAmelCase = feat_extract.model_input_names[0]
_UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
_UpperCAmelCase = feat_extract.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
_UpperCAmelCase = feat_extract.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""pt""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_pt.numpy().astype(np.floataa ).sum() ) < 1e-2 )
@require_tf
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.feature_extraction_class(**self.feat_extract_dict )
_UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCAmelCase = feat_extract.model_input_names[0]
_UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
_UpperCAmelCase = feat_extract.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""np""" )[input_name]
_UpperCAmelCase = feat_extract.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""tf""" )[input_name]
self.assertTrue(abs(input_np.astype(np.floataa ).sum() - input_tf.numpy().astype(np.floataa ).sum() ) < 1e-2 )
def _snake_case ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.feat_extract_dict
_UpperCAmelCase = True
_UpperCAmelCase = self.feature_extraction_class(**__UpperCamelCase )
_UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCAmelCase = [len(__UpperCamelCase ) for x in speech_inputs]
_UpperCAmelCase = feat_extract.model_input_names[0]
_UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
_UpperCAmelCase = feat_extract.pad(__UpperCamelCase , padding="""longest""" , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __UpperCamelCase )
self.assertListEqual(list(processed.attention_mask.shape ) , list(processed[input_name].shape[:2] ) )
self.assertListEqual(processed.attention_mask.sum(-1 ).tolist() , __UpperCamelCase )
def _snake_case ( self : Any ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.feat_extract_dict
_UpperCAmelCase = True
_UpperCAmelCase = self.feature_extraction_class(**__UpperCamelCase )
_UpperCAmelCase = self.feat_extract_tester.prepare_inputs_for_common()
_UpperCAmelCase = [len(__UpperCamelCase ) for x in speech_inputs]
_UpperCAmelCase = feat_extract.model_input_names[0]
_UpperCAmelCase = BatchFeature({input_name: speech_inputs} )
_UpperCAmelCase = min(__UpperCamelCase )
_UpperCAmelCase = feat_extract.pad(
__UpperCamelCase , padding="""max_length""" , max_length=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors="""np""" )
self.assertIn("""attention_mask""" , __UpperCamelCase )
self.assertListEqual(
list(processed_pad.attention_mask.shape ) , [processed_pad[input_name].shape[0], max_length] )
self.assertListEqual(
processed_pad.attention_mask[:, :max_length].sum(-1 ).tolist() , [max_length for x in speech_inputs] ) | 711 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
a : str = True
except (ImportError, ModuleNotFoundError):
a : List[str] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
re.sub("""<n>""" , """""" , _A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_A ) ) | 19 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Tuple = logging.get_logger(__name__)
a : Optional[Any] = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class a_ ( _UpperCAmelCase ):
'''simple docstring'''
a : int = 'wavlm'
def __init__( self : List[str] , __UpperCamelCase : Optional[int]=32 , __UpperCamelCase : Dict=7_68 , __UpperCamelCase : List[Any]=12 , __UpperCamelCase : List[str]=12 , __UpperCamelCase : str=30_72 , __UpperCamelCase : int="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : List[Any]=0.0_2 , __UpperCamelCase : Optional[Any]=1e-5 , __UpperCamelCase : Optional[Any]="group" , __UpperCamelCase : Union[str, Any]="gelu" , __UpperCamelCase : str=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __UpperCamelCase : Union[str, Any]=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase : Union[str, Any]=(10, 3, 3, 3, 3, 2, 2) , __UpperCamelCase : str=False , __UpperCamelCase : List[Any]=1_28 , __UpperCamelCase : Union[str, Any]=16 , __UpperCamelCase : Optional[int]=3_20 , __UpperCamelCase : Any=8_00 , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Optional[int]=0.0_5 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : int=2 , __UpperCamelCase : str=0.0 , __UpperCamelCase : Optional[int]=10 , __UpperCamelCase : Any=3_20 , __UpperCamelCase : Any=2 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : List[str]=1_00 , __UpperCamelCase : int=2_56 , __UpperCamelCase : Dict=2_56 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Optional[int]="mean" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : str=False , __UpperCamelCase : List[str]=2_56 , __UpperCamelCase : Tuple=(5_12, 5_12, 5_12, 5_12, 15_00) , __UpperCamelCase : Union[str, Any]=(5, 3, 3, 1, 1) , __UpperCamelCase : Any=(1, 2, 3, 1, 1) , __UpperCamelCase : int=5_12 , __UpperCamelCase : List[Any]=80 , __UpperCamelCase : Optional[int]=0 , __UpperCamelCase : Dict=1 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=3 , __UpperCamelCase : str=2 , __UpperCamelCase : Dict=3 , __UpperCamelCase : int=None , **__UpperCamelCase : Optional[Any] , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = feat_extract_norm
_UpperCAmelCase = feat_extract_activation
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = conv_bias
_UpperCAmelCase = num_buckets
_UpperCAmelCase = max_bucket_distance
_UpperCAmelCase = num_conv_pos_embeddings
_UpperCAmelCase = num_conv_pos_embedding_groups
_UpperCAmelCase = len(self.conv_dim )
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = feat_proj_dropout
_UpperCAmelCase = final_dropout
_UpperCAmelCase = layerdrop
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_ctc_classes
_UpperCAmelCase = vocab_size
_UpperCAmelCase = do_stable_layer_norm
_UpperCAmelCase = use_weighted_layer_sum
_UpperCAmelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase = apply_spec_augment
_UpperCAmelCase = mask_time_prob
_UpperCAmelCase = mask_time_length
_UpperCAmelCase = mask_time_min_masks
_UpperCAmelCase = mask_feature_prob
_UpperCAmelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
_UpperCAmelCase = num_codevectors_per_group
_UpperCAmelCase = num_codevector_groups
_UpperCAmelCase = contrastive_logits_temperature
_UpperCAmelCase = num_negatives
_UpperCAmelCase = codevector_dim
_UpperCAmelCase = proj_codevector_dim
_UpperCAmelCase = diversity_loss_weight
# ctc loss
_UpperCAmelCase = ctc_loss_reduction
_UpperCAmelCase = ctc_zero_infinity
# adapter
_UpperCAmelCase = add_adapter
_UpperCAmelCase = adapter_kernel_size
_UpperCAmelCase = adapter_stride
_UpperCAmelCase = num_adapter_layers
_UpperCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = xvector_output_dim
@property
def _snake_case ( self : str ) ->Tuple:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 712 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a : str = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a_ :
a : List[Any] = PegasusConfig
a : Dict = {}
a : List[Any] = 'gelu'
def __init__( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple=13 , __UpperCamelCase : Tuple=7 , __UpperCamelCase : Tuple=True , __UpperCamelCase : Any=False , __UpperCamelCase : Any=99 , __UpperCamelCase : Optional[int]=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Dict=37 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Optional[Any]=20 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Tuple=0 , ) ->int:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def _snake_case ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _snake_case ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _UpperCamelCase ( _A , _A , _A , _A=None , _A=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = np.not_equal(_A , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a_ ( _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a : Any = True
a : int = False
a : Union[str, Any] = False
a : Optional[int] = False
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase )
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(__UpperCamelCase : List[Any] , __UpperCamelCase : str=None , **__UpperCamelCase : int ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_UpperCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self : int ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__UpperCamelCase )
_UpperCAmelCase = np.ones((1, 1) )
_UpperCAmelCase = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_UpperCAmelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""np""" , truncation=__UpperCamelCase , max_length=5_12 , padding=__UpperCamelCase )
_UpperCAmelCase = model.generate(**__UpperCamelCase , num_beams=2 ).sequences
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert tgt_text == decoded | 19 | 0 |
"""simple docstring"""
import string
from math import logaa
def _UpperCamelCase ( _A , _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = document.translate(
str.maketrans("""""" , """""" , string.punctuation ) ).replace("""\n""" , """""" )
_UpperCAmelCase = document_without_punctuation.split(""" """ ) # word tokenization
return len([word for word in tokenize_document if word.lower() == term.lower()] )
def _UpperCamelCase ( _A , _A ) -> tuple[int, int]:
"""simple docstring"""
_UpperCAmelCase = corpus.lower().translate(
str.maketrans("""""" , """""" , string.punctuation ) ) # strip all punctuation and replace it with ''
_UpperCAmelCase = corpus_without_punctuation.split("""\n""" )
_UpperCAmelCase = term.lower()
return (len([doc for doc in docs if term in doc] ), len(_A ))
def _UpperCamelCase ( _A , _A , _A=False ) -> float:
"""simple docstring"""
if smoothing:
if n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(1 + logaa(n / (1 + df) ) , 3 )
if df == 0:
raise ZeroDivisionError("""df must be > 0""" )
elif n == 0:
raise ValueError("""log10(0) is undefined.""" )
return round(logaa(n / df ) , 3 )
def _UpperCamelCase ( _A , _A ) -> float:
"""simple docstring"""
return round(tf * idf , 3 ) | 713 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : Optional[int]=13 , __UpperCamelCase : List[str]=7 , __UpperCamelCase : List[Any]=9 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : str=True , __UpperCamelCase : int=False , __UpperCamelCase : int=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : List[Any]=8 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : str=0.0_0_2 , __UpperCamelCase : Union[str, Any]=1 , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : Tuple=0 , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Any=None , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = encoder_seq_length
_UpperCAmelCase = decoder_seq_length
# For common tests
_UpperCAmelCase = self.decoder_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = d_ff
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = decoder_start_token_id
_UpperCAmelCase = None
_UpperCAmelCase = decoder_layers
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
return TaConfig.from_pretrained("""google/umt5-base""" )
def _snake_case ( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : str=None , ) ->int:
'''simple docstring'''
if attention_mask is None:
_UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCamelCase )
if decoder_head_mask is None:
_UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
if cross_attn_head_mask is None:
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = config.num_attention_heads
_UpperCAmelCase = self.prepare_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, input_dict
def _snake_case ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : Tuple ) ->Dict:
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , )
_UpperCAmelCase = model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase )
_UpperCAmelCase = result.last_hidden_state
_UpperCAmelCase = result.past_key_values
_UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , ) ->str:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).get_decoder().to(__UpperCamelCase ).eval()
# first forward pass
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) + 1 )
_UpperCAmelCase ,_UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = model(__UpperCamelCase )["""last_hidden_state"""]
_UpperCAmelCase = model(__UpperCamelCase , past_key_values=__UpperCamelCase )["""last_hidden_state"""]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def _snake_case ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Dict , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).to(__UpperCamelCase ).half().eval()
_UpperCAmelCase = model(**__UpperCamelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__UpperCamelCase ).any().item() )
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a : Optional[Any] = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a : Any = True
a : Optional[int] = False
a : Any = False
a : Optional[int] = True
a : Optional[Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a : int = [0.8, 0.9]
def _snake_case ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = UMTaModel(config_and_inputs[0] ).to(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=__UpperCamelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs[0]
_UpperCAmelCase = UMTaForConditionalGeneration(__UpperCamelCase ).eval()
model.to(__UpperCamelCase )
_UpperCAmelCase = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__UpperCamelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
}
for attn_name, (name, mask) in zip(__UpperCamelCase , head_masking.items() ):
_UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCamelCase )
_UpperCAmelCase = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , **__UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _snake_case ( self : Tuple ) ->List[Any]:
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__UpperCamelCase ).to(__UpperCamelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__UpperCamelCase , legacy=__UpperCamelCase )
_UpperCAmelCase = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase ).input_ids
# fmt: off
_UpperCAmelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model.generate(input_ids.to(__UpperCamelCase ) )
_UpperCAmelCase = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( _A , _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = LxmertConfig.from_json_file(_A )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase = LxmertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_A , _A , _A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 714 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( _UpperCAmelCase ):
def _snake_case ( self : str ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : Any ) ->List[str]:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _snake_case ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : str ) ->Dict:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def _snake_case ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def _snake_case ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _snake_case ( self : str ) ->Optional[Any]:
'''simple docstring'''
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__UpperCamelCase ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
_UpperCAmelCase ,_UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __UpperCamelCase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(_A ) if isinstance(_A , pa.Buffer ) else pa.memory_map(_A )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=_A , features=_A ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_A )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
_UpperCAmelCase = os.path.join(_A , """test.arrow""" )
with ArrowWriter(path=_A , schema=pa.schema(_A ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(_A , 1 )
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
if pa.types.is_list(_A ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
if isinstance(lst[0] , _A ):
change_first_primitive_element_in_list(lst[0] , _A )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(_A , optimized_int_type=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(_A )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_A , _A )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=_A ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = """mock://dataset-train.arrow"""
with ArrowWriter(path=_A , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_A ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_A )
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_A , format="""png""" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=_A , features=Features({"""image""": Image()} ) , embed_local_files=_A ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , _A )
with open(_A , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("""col_1""" , pa.string() , nullable=_A )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=_A ) as writer:
writer._build_writer(inferred_schema=_A )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] ) | 19 | 0 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : Optional[int]=13 , __UpperCamelCase : List[str]=7 , __UpperCamelCase : List[Any]=9 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : str=True , __UpperCamelCase : int=False , __UpperCamelCase : int=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : List[Any]=8 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : str=0.0_0_2 , __UpperCamelCase : Union[str, Any]=1 , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : Tuple=0 , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Any=None , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = encoder_seq_length
_UpperCAmelCase = decoder_seq_length
# For common tests
_UpperCAmelCase = self.decoder_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = d_ff
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = decoder_start_token_id
_UpperCAmelCase = None
_UpperCAmelCase = decoder_layers
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
return TaConfig.from_pretrained("""google/umt5-base""" )
def _snake_case ( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : str=None , ) ->int:
'''simple docstring'''
if attention_mask is None:
_UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCamelCase )
if decoder_head_mask is None:
_UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
if cross_attn_head_mask is None:
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = config.num_attention_heads
_UpperCAmelCase = self.prepare_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, input_dict
def _snake_case ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : Tuple ) ->Dict:
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , )
_UpperCAmelCase = model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase )
_UpperCAmelCase = result.last_hidden_state
_UpperCAmelCase = result.past_key_values
_UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , ) ->str:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).get_decoder().to(__UpperCamelCase ).eval()
# first forward pass
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) + 1 )
_UpperCAmelCase ,_UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = model(__UpperCamelCase )["""last_hidden_state"""]
_UpperCAmelCase = model(__UpperCamelCase , past_key_values=__UpperCamelCase )["""last_hidden_state"""]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def _snake_case ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Dict , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).to(__UpperCamelCase ).half().eval()
_UpperCAmelCase = model(**__UpperCamelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__UpperCamelCase ).any().item() )
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a : Optional[Any] = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a : Any = True
a : Optional[int] = False
a : Any = False
a : Optional[int] = True
a : Optional[Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a : int = [0.8, 0.9]
def _snake_case ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = UMTaModel(config_and_inputs[0] ).to(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=__UpperCamelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs[0]
_UpperCAmelCase = UMTaForConditionalGeneration(__UpperCamelCase ).eval()
model.to(__UpperCamelCase )
_UpperCAmelCase = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__UpperCamelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
}
for attn_name, (name, mask) in zip(__UpperCamelCase , head_masking.items() ):
_UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCamelCase )
_UpperCAmelCase = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , **__UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _snake_case ( self : Tuple ) ->List[Any]:
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__UpperCamelCase ).to(__UpperCamelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__UpperCamelCase , legacy=__UpperCamelCase )
_UpperCAmelCase = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase ).input_ids
# fmt: off
_UpperCAmelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model.generate(input_ids.to(__UpperCamelCase ) )
_UpperCAmelCase = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase ) | 715 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : List[Any] = get_logger()
a : Optional[dict] = None
class a_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : int ) ->Tuple:
'''simple docstring'''
super().__init__(features=__UpperCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(__UpperCamelCase )}, as `jaxlib.xla_extension.Device` """
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
_UpperCAmelCase = device if isinstance(__UpperCamelCase , __UpperCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
_UpperCAmelCase = str(jax.devices()[0] )
_UpperCAmelCase = jnp_array_kwargs
@staticmethod
def _snake_case ( ) ->Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(__UpperCamelCase ): device for device in jax.devices()}
def _snake_case ( self : Dict , __UpperCamelCase : Any ) ->Union[str, Any]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , __UpperCamelCase ) and column:
if all(
isinstance(__UpperCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__UpperCamelCase , axis=0 )
return column
def _snake_case ( self : List[str] , __UpperCamelCase : Any ) ->Optional[int]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , (str, bytes, type(__UpperCamelCase )) ):
return value
elif isinstance(__UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase = {}
if isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
else:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
elif isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCamelCase , PIL.Image.Image ):
_UpperCAmelCase = np.asarray(__UpperCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__UpperCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__UpperCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__UpperCamelCase , """__array__""" ) and not isinstance(__UpperCamelCase , jax.Array ):
_UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
elif isinstance(__UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : dict ) ->int:
'''simple docstring'''
return map_nested(self._recursive_tensorize , __UpperCamelCase , map_list=__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_row(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_row(__UpperCamelCase )
return self.recursive_tensorize(__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : pa.Table ) ->"jax.Array":
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_column(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_column(__UpperCamelCase , pa_table.column_names[0] )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
_UpperCAmelCase = self._consolidate(__UpperCamelCase )
return column
def _snake_case ( self : Optional[Any] , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_batch(__UpperCamelCase )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
for column_name in batch:
_UpperCAmelCase = self._consolidate(batch[column_name] )
return batch | 19 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a : Union[str, Any] = False
class a_ ( unittest.TestCase ):
'''simple docstring'''
pass
@nightly
@require_torch_gpu
class a_ ( unittest.TestCase ):
'''simple docstring'''
def _snake_case ( self : List[str] ) ->Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe.dual_guided(
prompt="""first prompt""" , image=__UpperCamelCase , text_to_image_strength=0.7_5 , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(__UpperCamelCase )
_UpperCAmelCase = VersatileDiffusionPipeline.from_pretrained(__UpperCamelCase , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = generator.manual_seed(0 )
_UpperCAmelCase = pipe.dual_guided(
prompt="""first prompt""" , image=__UpperCamelCase , text_to_image_strength=0.7_5 , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _snake_case ( self : Union[str, Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(__UpperCamelCase )
pipe.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = """cyberpunk 2077"""
_UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe.dual_guided(
prompt=__UpperCamelCase , image=__UpperCamelCase , text_to_image_strength=0.7_5 , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_UpperCAmelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_UpperCAmelCase = """A painting of a squirrel eating a burger """
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe.text_to_image(
prompt=__UpperCamelCase , generator=__UpperCamelCase , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
_UpperCAmelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_UpperCAmelCase = pipe.image_variation(__UpperCamelCase , generator=__UpperCamelCase , output_type="""numpy""" ).images
_UpperCAmelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 716 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Tuple = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 19 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Optional[int] = {
'''google/vivit-b-16x2-kinetics400''': (
'''https://huggingface.co/google/vivit-b-16x2-kinetics400/resolve/main/config.json'''
),
# See all Vivit models at https://huggingface.co/models?filter=vivit
}
class a_ ( _UpperCAmelCase ):
a : Dict = 'vivit'
def __init__( self : Tuple , __UpperCamelCase : Tuple=2_24 , __UpperCamelCase : Optional[Any]=32 , __UpperCamelCase : Tuple=[2, 16, 16] , __UpperCamelCase : Dict=3 , __UpperCamelCase : Tuple=7_68 , __UpperCamelCase : Optional[Any]=12 , __UpperCamelCase : Optional[Any]=12 , __UpperCamelCase : Dict=30_72 , __UpperCamelCase : Any="gelu_fast" , __UpperCamelCase : int=0.0 , __UpperCamelCase : Dict=0.0 , __UpperCamelCase : Optional[int]=0.0_2 , __UpperCamelCase : Optional[int]=1e-06 , __UpperCamelCase : Optional[int]=True , **__UpperCamelCase : Dict , ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = image_size
_UpperCAmelCase = num_frames
_UpperCAmelCase = tubelet_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = qkv_bias
super().__init__(**__UpperCamelCase ) | 717 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
a : int = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _UpperCamelCase ( _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = test_results.split(""" """ )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_UpperCAmelCase = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(_A ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = None
_UpperCAmelCase = False
for line in failures_short_lines.split("""\n""" ):
if re.search(R"""_ \[doctest\]""" , _A ):
_UpperCAmelCase = True
_UpperCAmelCase = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
_UpperCAmelCase = line
_UpperCAmelCase = False
return failures
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = title
_UpperCAmelCase = doc_test_results["""time_spent"""].split(""",""" )[0]
_UpperCAmelCase = doc_test_results["""success"""]
_UpperCAmelCase = doc_test_results["""failures"""]
_UpperCAmelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
_UpperCAmelCase = doc_test_results
@property
def _snake_case ( self : int ) ->str:
'''simple docstring'''
_UpperCAmelCase = [self._time_spent]
_UpperCAmelCase = 0
for time in time_spent:
_UpperCAmelCase = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = [0, 0, time_parts[0]]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f"""{int(__UpperCamelCase )}h{int(__UpperCamelCase )}m{int(__UpperCamelCase )}s"""
@property
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = 40
_UpperCAmelCase = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(__UpperCamelCase , __UpperCamelCase )}
_UpperCAmelCase = """"""
for category, failures in category_failures.items():
if len(__UpperCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__UpperCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__UpperCamelCase )
@staticmethod
def _snake_case ( ) ->Any:
'''simple docstring'''
_UpperCAmelCase = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(__UpperCamelCase )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=__UpperCamelCase , )
def _snake_case ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
_UpperCAmelCase = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else """All tests passed."""
_UpperCAmelCase = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=__UpperCamelCase , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase = """"""
for key, value in failures.items():
_UpperCAmelCase = value[:2_00] + """ [Truncated]""" if len(__UpperCamelCase ) > 2_50 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
_UpperCAmelCase = job_name
_UpperCAmelCase = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
_UpperCAmelCase = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _snake_case ( self : int ) ->Optional[Any]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
_UpperCAmelCase = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
_UpperCAmelCase = sorted(self.doc_test_results.items() , key=lambda __UpperCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
_UpperCAmelCase = f"""*Num failures* :{len(job_result["failed"] )} \n"""
_UpperCAmelCase = job_result["""failures"""]
_UpperCAmelCase = self.get_reply_blocks(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , text=__UpperCamelCase )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f"""Results for {job}""" , blocks=__UpperCamelCase , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = os.environ["""GITHUB_RUN_ID"""]
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A ).json()
_UpperCAmelCase = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , _A )
return {}
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {}
if os.path.exists(_A ):
_UpperCAmelCase = os.listdir(_A )
for file in files:
try:
with open(os.path.join(_A , _A ) , encoding="""utf-8""" ) as f:
_UpperCAmelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(_A , _A )}.""" ) from e
return _artifact
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
class a_ :
def __init__( self : List[Any] , __UpperCamelCase : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = name
_UpperCAmelCase = []
def __str__( self : int ) ->Optional[Any]:
'''simple docstring'''
return self.name
def _snake_case ( self : Dict , __UpperCamelCase : str ) ->int:
'''simple docstring'''
self.paths.append({"""name""": self.name, """path""": path} )
_UpperCAmelCase = {}
_UpperCAmelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
_UpperCAmelCase = directory
if artifact_name not in _available_artifacts:
_UpperCAmelCase = Artifact(_A )
_available_artifacts[artifact_name].add_path(_A )
return _available_artifacts
if __name__ == "__main__":
a : Dict = get_job_links()
a : Dict = retrieve_available_artifacts()
a : Optional[int] = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
a : Dict = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
a : int = github_actions_job_links.get('''run_doctests''')
a : Tuple = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
a : Optional[Any] = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
a , a , a : str = handle_test_results(artifact['''stats'''])
a : Tuple = failed
a : int = success
a : Any = time_spent[1:-1] + ''', '''
a : Dict = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
a : List[Any] = line.replace('''FAILED ''', '''''')
a : Tuple = line.split()[0].replace('''\n''', '''''')
if "::" in line:
a , a : Union[str, Any] = line.split('''::''')
else:
a , a : Optional[Any] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
a : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
a : Optional[Any] = all_failures[test] if test in all_failures else '''N/A'''
a : List[str] = failure
break
a : List[Any] = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply() | 19 | 0 |
import math
def _UpperCamelCase ( _A ) -> bool:
"""simple docstring"""
assert isinstance(_A , _A ) and (
number >= 0
), "'number' must been an int and positive"
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or not number % 2:
# Negatives, 0, 1 and all even numbers are not primes
return False
_UpperCAmelCase = range(3 , int(math.sqrt(_A ) + 1 ) , 2 )
return not any(not number % i for i in odd_numbers )
def _UpperCamelCase ( _A , _A=1 , **_A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = factor * value
_UpperCAmelCase = value
while not is_prime(_A ):
value += 1 if not ("desc" in kwargs and kwargs["desc"] is True) else -1
if value == first_value_val:
return next_prime(value + 1 , **_A )
return value | 718 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase ( _A , _A=False ) -> str:
"""simple docstring"""
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
a : Union[str, Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
a : Tuple = parse_flag_from_env('''RUN_REMOTE''', default=False)
a : Union[str, Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
a : int = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
a : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
a : List[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
a : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
a : int = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
a : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
a : Any = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
a : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires faiss""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires regex""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires elasticsearch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires sqlalchemy""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires PyTorch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
if not config.TF_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires TensorFlow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
if not config.JAX_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires JAX""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not config.PIL_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires Pillow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
def _require_spacy_model(_A ):
try:
import spacy # noqa F401
spacy.load(_A )
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(_A ) )(_A )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCAmelCase = unittest.skip("""test is slow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
_UpperCAmelCase = unittest.skip("""test is local""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCAmelCase = unittest.skip("""test is packaged""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCAmelCase = unittest.skip("""test requires remote""" )(_A )
return test_case
def _UpperCamelCase ( *_A ) -> Dict:
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_A ) and name.startswith("""test""" ):
for decorator in decorators:
_UpperCAmelCase = decorator(_A )
setattr(cls , _A , _A )
return cls
return decorate
class a_ ( _UpperCAmelCase ):
pass
class a_ ( _UpperCAmelCase ):
a : Any = 0
a : Optional[Any] = 1
a : int = 2
@contextmanager
def _UpperCamelCase ( _A=OfflineSimulationMode.CONNECTION_FAILS , _A=1e-16 ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = requests.Session().request
def timeout_request(_A , _A , _A , **_A ):
# Change the url to an invalid url so that the connection hangs
_UpperCAmelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_UpperCAmelCase = timeout
try:
return online_request(_A , _A , **_A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCAmelCase = url
_UpperCAmelCase = e.args[0]
_UpperCAmelCase = (max_retry_error.args[0].replace("""10.255.255.1""" , F"""OfflineMock[{url}]""" ),)
_UpperCAmelCase = (max_retry_error,)
raise
def raise_connection_error(_A , _A , **_A ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=_A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , _A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , _A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , _A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _UpperCamelCase ( *_A , **_A ) -> str:
"""simple docstring"""
_UpperCAmelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_A , **_A ) as tmp_dir:
try:
os.chdir(_A )
yield
finally:
os.chdir(_A )
@contextmanager
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
return deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist()
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_A , *_A , **_A ):
try:
return func(*_A , **_A )
except HTTPError as err:
if str(_A ).startswith("""500""" ) or str(_A ).startswith("""502""" ):
pytest.xfail(str(_A ) )
raise err
return decorator.decorator(_wrapper , _A )
class a_ :
def __init__( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_A )
else:
break
async def _UpperCamelCase ( _A , _A=None , _A=None , _A=None , _A=False , _A=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(_A ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_A , _A , _A , _A="" ):
_UpperCAmelCase = line.decode("""utf-8""" ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _A : tee(_A , _A , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda _A : tee(_A , _A , sys.stderr , label="""stderr:""" ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def _UpperCamelCase ( _A , _A=None , _A=None , _A=1_8_0 , _A=False , _A=True ) -> _RunOutput:
"""simple docstring"""
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
_UpperCAmelCase = """ """.join(_A )
if result.returncode > 0:
_UpperCAmelCase = """\n""".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
_UpperCAmelCase = re.sub(R"""^gw""" , """""" , _A , 0 , re.M )
return int(_A )
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 2_9_5_0_0
_UpperCAmelCase = pytest_xdist_worker_id()
return port + uniq_delta | 19 | 0 |
"""simple docstring"""
from collections import defaultdict
from graphs.minimum_spanning_tree_prims import prisms_algorithm as mst
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase ,_UpperCAmelCase = 9, 1_4 # noqa: F841
_UpperCAmelCase = [
[0, 1, 4],
[0, 7, 8],
[1, 2, 8],
[7, 8, 7],
[7, 6, 1],
[2, 8, 2],
[8, 6, 6],
[2, 3, 7],
[2, 5, 4],
[6, 5, 2],
[3, 5, 1_4],
[3, 4, 9],
[5, 4, 1_0],
[1, 7, 1_1],
]
_UpperCAmelCase = defaultdict(_A )
for nodea, nodea, cost in edges:
adjancency[nodea].append([nodea, cost] )
adjancency[nodea].append([nodea, cost] )
_UpperCAmelCase = mst(_A )
_UpperCAmelCase = [
[7, 6, 1],
[2, 8, 2],
[6, 5, 2],
[0, 1, 4],
[2, 5, 4],
[2, 3, 7],
[0, 7, 8],
[3, 4, 9],
]
for answer in expected:
_UpperCAmelCase = tuple(answer[:2] )
_UpperCAmelCase = tuple(edge[::-1] )
assert edge in result or reverse in result | 719 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class a_ ( _UpperCAmelCase ):
a : List[Any] = ''
a : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Tuple , __UpperCamelCase : Optional[DatasetInfo] = None , __UpperCamelCase : Optional[str] = None , **__UpperCamelCase : Any , ) ->Any:
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
_UpperCAmelCase = repo_info
_UpperCAmelCase = token
_UpperCAmelCase = None
def _snake_case ( self : List[str] ) ->List[str]:
'''simple docstring'''
if self.dir_cache is None:
_UpperCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_UpperCAmelCase = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : str = "rb" , **__UpperCamelCase : Any , ) ->List[str]:
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
_UpperCAmelCase = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def _snake_case ( self : int , __UpperCamelCase : int , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple=False , **__UpperCamelCase : List[str] ) ->Optional[Any]:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = PurePosixPath(path.strip("""/""" ) )
_UpperCAmelCase = {}
for p, f in self.dir_cache.items():
_UpperCAmelCase = PurePosixPath(p.strip("""/""" ) )
_UpperCAmelCase = p.parent
if root == path:
_UpperCAmelCase = f
_UpperCAmelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 19 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
a : Any = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Tuple = ['''NllbTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = ['''NllbTokenizerFast''']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
a : List[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 720 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
a : Optional[Any] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
a : List[str] = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
a : Any = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
a : int = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
a : List[Any] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any]=[1, 10, 1_00] , __UpperCamelCase : Dict=4 , __UpperCamelCase : Tuple=3.0 ) ->Union[str, Any]:
'''simple docstring'''
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=__UpperCamelCase ) as executor:
_UpperCAmelCase = []
_UpperCAmelCase = Counter()
_UpperCAmelCase = 0
_UpperCAmelCase = defaultdict(__UpperCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(__UpperCamelCase , __UpperCamelCase ) ):
for candidate in candidates:
_UpperCAmelCase = candidate + """\n""" + test_case
_UpperCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
_UpperCAmelCase = executor.submit(__UpperCamelCase , *__UpperCamelCase )
futures.append(__UpperCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__UpperCamelCase ):
_UpperCAmelCase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
_UpperCAmelCase ,_UpperCAmelCase = [], []
for result in results.values():
result.sort()
_UpperCAmelCase = [r[1]["""passed"""] for r in result]
total.append(len(__UpperCamelCase ) )
correct.append(sum(__UpperCamelCase ) )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = k
_UpperCAmelCase = {f"""pass@{k}""": estimate_pass_at_k(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
def estimator(_A , _A , _A ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_A , _A ):
_UpperCAmelCase = itertools.repeat(_A , len(_A ) )
else:
assert len(_A ) == len(_A )
_UpperCAmelCase = iter(_A )
return np.array([estimator(int(_A ) , int(_A ) , _A ) for n, c in zip(_A , _A )] ) | 19 | 0 |
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
if not isinstance(_A , _A ):
raise ValueError("""iterations must be defined as integers""" )
if not isinstance(_A , _A ) or not number >= 1:
raise ValueError(
"""starting number must be
and integer and be more than 0""" )
if not iterations >= 1:
raise ValueError("""Iterations must be done more than 0 times to play FizzBuzz""" )
_UpperCAmelCase = """"""
while number <= iterations:
if number % 3 == 0:
out += "Fizz"
if number % 5 == 0:
out += "Buzz"
if 0 not in (number % 3, number % 5):
out += str(_A )
# print(out)
number += 1
out += " "
return out
if __name__ == "__main__":
import doctest
doctest.testmod() | 721 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> np.array:
"""simple docstring"""
_UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCAmelCase = np.zeros((n + 1,) )
_UpperCAmelCase = ya
_UpperCAmelCase = xa
for k in range(_A ):
_UpperCAmelCase = y[k] + step_size * ode_func(_A , y[k] )
_UpperCAmelCase = y[k] + (
(step_size / 2) * (ode_func(_A , y[k] ) + ode_func(x + step_size , _A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 19 | 0 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE ) -> int:
"""simple docstring"""
assert (
isinstance(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE ) and number_of_steps > 0
), F'number_of_steps needs to be positive integer, your input {number_of_steps}'
if number_of_steps == 1:
return 1
UpperCamelCase__ , UpperCamelCase__ = 1, 1
for _ in range(number_of_steps - 1 ):
UpperCamelCase__ , UpperCamelCase__ = current + previous, current
return current
if __name__ == "__main__":
import doctest
doctest.testmod()
| 20 |
"""simple docstring"""
def lowerCAmelCase_( SCREAMING_SNAKE_CASE = 50_00_00_00 ) -> int:
"""simple docstring"""
UpperCamelCase__ = set()
UpperCamelCase__ = int((limit - 24) ** (1 / 2) )
UpperCamelCase__ = set(range(3 , prime_square_limit + 1 , 2 ) )
primes.add(2 )
for p in range(3 , prime_square_limit + 1 , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , prime_square_limit + 1 , SCREAMING_SNAKE_CASE ) ) )
for primea in primes:
UpperCamelCase__ = primea * primea
for primea in primes:
UpperCamelCase__ = primea * primea * primea
if square + cube >= limit - 16:
break
for primea in primes:
UpperCamelCase__ = primea * primea * primea * primea
UpperCamelCase__ = square + cube + tetr
if total >= limit:
break
ret.add(SCREAMING_SNAKE_CASE )
return len(SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
print(F"""{solution() = }""")
| 20 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.