code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import numpy as np
import torch
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput, randn_tensor
from .scheduling_utils import SchedulerMixin
@dataclass
class a_ ( _lowercase ):
a : torch.FloatTensor
a : torch.FloatTensor
a : Optional[torch.FloatTensor] = None
class a_ ( _lowercase , _lowercase ):
a : Any = 2
@register_to_config
def __init__( self : Optional[int] , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : float = 1_00 , __UpperCamelCase : float = 1.0_0_7 , __UpperCamelCase : float = 80 , __UpperCamelCase : float = 0.0_5 , __UpperCamelCase : float = 50 , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = sigma_max
# setable values
_UpperCAmelCase = None
_UpperCAmelCase = None
_UpperCAmelCase = None # sigma(t_i)
def _snake_case ( self : Dict , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : Optional[int] = None ) ->torch.FloatTensor:
'''simple docstring'''
return sample
def _snake_case ( self : str , __UpperCamelCase : int , __UpperCamelCase : Union[str, torch.device] = None ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = num_inference_steps
_UpperCAmelCase = np.arange(0 , self.num_inference_steps )[::-1].copy()
_UpperCAmelCase = torch.from_numpy(A_ ).to(A_ )
_UpperCAmelCase = [
(
self.config.sigma_max**2
* (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1))
)
for i in self.timesteps
]
_UpperCAmelCase = torch.tensor(A_ , dtype=torch.floataa , device=A_ )
def _snake_case ( self : int , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float , __UpperCamelCase : Optional[torch.Generator] = None ) ->Tuple[torch.FloatTensor, float]:
'''simple docstring'''
if self.config.s_min <= sigma <= self.config.s_max:
_UpperCAmelCase = min(self.config.s_churn / self.num_inference_steps , 2**0.5 - 1 )
else:
_UpperCAmelCase = 0
# sample eps ~ N(0, S_noise^2 * I)
_UpperCAmelCase = self.config.s_noise * randn_tensor(sample.shape , generator=A_ ).to(sample.device )
_UpperCAmelCase = sigma + gamma * sigma
_UpperCAmelCase = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps)
return sample_hat, sigma_hat
def _snake_case ( self : Optional[int] , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : bool = True , ) ->Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
_UpperCAmelCase = sample_hat + sigma_hat * model_output
_UpperCAmelCase = (sample_hat - pred_original_sample) / sigma_hat
_UpperCAmelCase = sample_hat + (sigma_prev - sigma_hat) * derivative
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A_ , derivative=A_ , pred_original_sample=A_ )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : float , __UpperCamelCase : float , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : torch.FloatTensor , __UpperCamelCase : bool = True , ) ->Union[KarrasVeOutput, Tuple]:
'''simple docstring'''
_UpperCAmelCase = sample_prev + sigma_prev * model_output
_UpperCAmelCase = (sample_prev - pred_original_sample) / sigma_prev
_UpperCAmelCase = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr)
if not return_dict:
return (sample_prev, derivative)
return KarrasVeOutput(
prev_sample=A_ , derivative=A_ , pred_original_sample=A_ )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] ) ->str:
'''simple docstring'''
raise NotImplementedError() | 721 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> np.array:
"""simple docstring"""
_UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCAmelCase = np.zeros((n + 1,) )
_UpperCAmelCase = ya
_UpperCAmelCase = xa
for k in range(_A ):
_UpperCAmelCase = y[k] + step_size * ode_func(_A , y[k] )
_UpperCAmelCase = y[k] + (
(step_size / 2) * (ode_func(_A , y[k] ) + ode_func(x + step_size , _A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 19 | 0 |
"""simple docstring"""
import argparse
import re
import requests
import torch
# git clone https://github.com/salesforce/BLIP.git
from models.blip import blip_decoder
from models.blip_itm import blip_itm
from models.blip_vqa import blip_vqa
from PIL import Image
from torchvision import transforms
from torchvision.transforms.functional import InterpolationMode
from transformers import (
BertTokenizer,
BlipConfig,
BlipForConditionalGeneration,
BlipForImageTextRetrieval,
BlipForQuestionAnswering,
)
def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg"""
_UpperCAmelCase = Image.open(requests.get(__SCREAMING_SNAKE_CASE , stream=__SCREAMING_SNAKE_CASE ).raw ).convert("""RGB""" )
_UpperCAmelCase = transforms.Compose(
[
transforms.Resize((image_size, image_size) , interpolation=InterpolationMode.BICUBIC ),
transforms.ToTensor(),
transforms.Normalize((0.48_145_466, 0.4_578_275, 0.40_821_073) , (0.26_862_954, 0.26_130_258, 0.27_577_711) ),
] )
_UpperCAmelCase = transform(__SCREAMING_SNAKE_CASE ).unsqueeze(0 ).to(__SCREAMING_SNAKE_CASE )
return image
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
if "visual_encoder" in key:
_UpperCAmelCase = re.sub("""visual_encoder*""" , """vision_model.encoder""" , __SCREAMING_SNAKE_CASE )
if "blocks" in key:
_UpperCAmelCase = re.sub(R"""blocks""" , """layers""" , __SCREAMING_SNAKE_CASE )
if "attn" in key:
_UpperCAmelCase = re.sub(R"""attn""" , """self_attn""" , __SCREAMING_SNAKE_CASE )
if "norm1" in key:
_UpperCAmelCase = re.sub(R"""norm1""" , """layer_norm1""" , __SCREAMING_SNAKE_CASE )
if "norm2" in key:
_UpperCAmelCase = re.sub(R"""norm2""" , """layer_norm2""" , __SCREAMING_SNAKE_CASE )
if "encoder.norm" in key:
_UpperCAmelCase = re.sub(R"""encoder.norm""" , """post_layernorm""" , __SCREAMING_SNAKE_CASE )
if "encoder.patch_embed.proj" in key:
_UpperCAmelCase = re.sub(R"""encoder.patch_embed.proj""" , """embeddings.patch_embedding""" , __SCREAMING_SNAKE_CASE )
if "encoder.pos_embed" in key:
_UpperCAmelCase = re.sub(R"""encoder.pos_embed""" , """embeddings.position_embedding""" , __SCREAMING_SNAKE_CASE )
if "encoder.cls_token" in key:
_UpperCAmelCase = re.sub(R"""encoder.cls_token""" , """embeddings.class_embedding""" , __SCREAMING_SNAKE_CASE )
if "self_attn" in key:
_UpperCAmelCase = re.sub(R"""self_attn.proj""" , """self_attn.projection""" , __SCREAMING_SNAKE_CASE )
return key
@torch.no_grad()
def _UpperCamelCase ( _A , _A=None ) -> Optional[Any]:
"""simple docstring"""
if config_path is not None:
_UpperCAmelCase = BlipConfig.from_pretrained(__SCREAMING_SNAKE_CASE )
else:
_UpperCAmelCase = BlipConfig(projection_dim=5_1_2 , text_config={} , vision_config={} )
_UpperCAmelCase = BlipForConditionalGeneration(__SCREAMING_SNAKE_CASE ).eval()
_UpperCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_capfilt_large.pth"""
_UpperCAmelCase = blip_decoder(pretrained=__SCREAMING_SNAKE_CASE , image_size=3_8_4 , vit="""base""" )
_UpperCAmelCase = pt_model.eval()
_UpperCAmelCase = pt_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCAmelCase = rename_key(__SCREAMING_SNAKE_CASE )
_UpperCAmelCase = value
hf_model.load_state_dict(__SCREAMING_SNAKE_CASE )
_UpperCAmelCase = 3_8_4
_UpperCAmelCase = load_demo_image(image_size=__SCREAMING_SNAKE_CASE , device="""cpu""" )
_UpperCAmelCase = BertTokenizer.from_pretrained("""bert-base-uncased""" )
_UpperCAmelCase = tokenizer(["""a picture of"""] ).input_ids
_UpperCAmelCase = hf_model.generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 3_8_6_1, 1_9_9_7, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
_UpperCAmelCase = hf_model.generate(__SCREAMING_SNAKE_CASE )
assert out[0].tolist() == [3_0_5_2_2, 1_0_3_7, 2_4_5_0, 3_5_6_4, 2_0_0_6, 1_9_9_6, 3_5_0_9, 2_0_0_7, 2_0_1_4, 3_8_9_9, 1_0_2]
if pytorch_dump_folder_path is not None:
hf_model.save_pretrained(__SCREAMING_SNAKE_CASE )
# model_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_vqa.pth'
_UpperCAmelCase = (
"""https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_vqa_capfilt_large.pth"""
)
_UpperCAmelCase = blip_vqa(pretrained=__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE , vit="""base""" )
vqa_model.eval()
_UpperCAmelCase = vqa_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCAmelCase = rename_key(__SCREAMING_SNAKE_CASE )
_UpperCAmelCase = value
_UpperCAmelCase = BlipForQuestionAnswering(__SCREAMING_SNAKE_CASE )
hf_vqa_model.load_state_dict(__SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ["""How many dogs are in this image?"""]
_UpperCAmelCase = tokenizer(__SCREAMING_SNAKE_CASE , return_tensors="""pt""" ).input_ids
_UpperCAmelCase = hf_vqa_model.generate(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE )
print(tokenizer.decode(answer[0] ) )
assert tokenizer.decode(answer[0] ) == "[UNK] 1 [SEP]"
if pytorch_dump_folder_path is not None:
hf_vqa_model.save_pretrained(pytorch_dump_folder_path + """_vqa""" )
_UpperCAmelCase = """https://storage.googleapis.com/sfr-vision-language-research/BLIP/models/model_base_retrieval_coco.pth"""
_UpperCAmelCase = blip_itm(pretrained=__SCREAMING_SNAKE_CASE , image_size=__SCREAMING_SNAKE_CASE , vit="""base""" )
itm_model.eval()
_UpperCAmelCase = itm_model.state_dict()
for key in modified_state_dict.copy():
_UpperCAmelCase = modified_state_dict.pop(__SCREAMING_SNAKE_CASE )
_UpperCAmelCase = rename_key(__SCREAMING_SNAKE_CASE )
_UpperCAmelCase = value
_UpperCAmelCase = BlipForImageTextRetrieval(__SCREAMING_SNAKE_CASE )
_UpperCAmelCase = ["""A picture of a woman with a dog sitting in a beach"""]
_UpperCAmelCase = tokenizer(
__SCREAMING_SNAKE_CASE , return_tensors="""pt""" , padding="""max_length""" , truncation=__SCREAMING_SNAKE_CASE , max_length=3_5 , ).input_ids
hf_itm_model.load_state_dict(__SCREAMING_SNAKE_CASE )
hf_itm_model.eval()
_UpperCAmelCase = hf_itm_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_itm_head=__SCREAMING_SNAKE_CASE )
_UpperCAmelCase = hf_itm_model(__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , use_itm_head=__SCREAMING_SNAKE_CASE )
assert out[0].item() == 0.2_110_687_494_277_954
assert torch.nn.functional.softmax(out_itm[0] , dim=1 )[:, 1].item() == 0.45_698_845_386_505_127
if pytorch_dump_folder_path is not None:
hf_itm_model.save_pretrained(pytorch_dump_folder_path + """_itm""" )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
a : Any = parser.parse_args()
convert_blip_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path) | 700 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
a : List[str] = logging.getLogger(__name__)
a : int = {'''facebook/bart-base''': BartForConditionalGeneration}
a : Dict = {'''facebook/bart-base''': BartTokenizer}
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_A , default=_A , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_A , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_A , default=_A , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_A , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_A , )
parser.add_argument(
"""--config_name""" , type=_A , default=_A , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_A , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_A , default=_A , help="""Where to store the final ONNX file.""" )
_UpperCAmelCase = parser.parse_args()
return args
def _UpperCamelCase ( _A , _A="cpu" ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = model_dict[model_name].from_pretrained(_A ).to(_A )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_A )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_A ) )
with torch.no_grad():
_UpperCAmelCase = """My friends are cool but they eat too many carbs."""
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="""pt""" ).to(model.device )
_UpperCAmelCase = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_A , max_length=_A , early_stopping=_A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_A , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _A , opset_version=1_4 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_A , )
logger.info("""Model exported to {}""".format(_A ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_A ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_A ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_A )
_UpperCAmelCase = ort_sess.run(
_A , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_A ),
"""max_length""": np.array(_A ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase ,_UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _A )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_A )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_A , _A , _A , _A , _A )
if __name__ == "__main__":
main() | 19 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : int = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class a_ ( _UpperCAmelCase ):
a : Optional[int] = 'wavlm'
def __init__( self : Optional[Any] , __UpperCamelCase : Dict=32 , __UpperCamelCase : int=7_68 , __UpperCamelCase : Dict=12 , __UpperCamelCase : Optional[int]=12 , __UpperCamelCase : Optional[Any]=30_72 , __UpperCamelCase : Optional[int]="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : str=0.1 , __UpperCamelCase : List[str]=0.0 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : int=1e-5 , __UpperCamelCase : int="group" , __UpperCamelCase : Any="gelu" , __UpperCamelCase : List[str]=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __UpperCamelCase : Dict=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase : Any=(10, 3, 3, 3, 3, 2, 2) , __UpperCamelCase : int=False , __UpperCamelCase : Optional[Any]=1_28 , __UpperCamelCase : Union[str, Any]=16 , __UpperCamelCase : Optional[int]=3_20 , __UpperCamelCase : str=8_00 , __UpperCamelCase : Tuple=False , __UpperCamelCase : Dict=True , __UpperCamelCase : int=0.0_5 , __UpperCamelCase : Any=10 , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : int=0.0 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : List[Any]=3_20 , __UpperCamelCase : int=2 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : str=1_00 , __UpperCamelCase : Union[str, Any]=2_56 , __UpperCamelCase : Tuple=2_56 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Dict="mean" , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : Dict=2_56 , __UpperCamelCase : List[Any]=(5_12, 5_12, 5_12, 5_12, 15_00) , __UpperCamelCase : Optional[int]=(5, 3, 3, 1, 1) , __UpperCamelCase : Optional[Any]=(1, 2, 3, 1, 1) , __UpperCamelCase : int=5_12 , __UpperCamelCase : Optional[Any]=80 , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : List[Any]=1 , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : str=False , __UpperCamelCase : Any=3 , __UpperCamelCase : str=2 , __UpperCamelCase : str=3 , __UpperCamelCase : List[Any]=None , **__UpperCamelCase : Tuple , ) ->List[str]:
'''simple docstring'''
super().__init__(**__UpperCamelCase , pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = feat_extract_norm
_UpperCAmelCase = feat_extract_activation
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = conv_bias
_UpperCAmelCase = num_buckets
_UpperCAmelCase = max_bucket_distance
_UpperCAmelCase = num_conv_pos_embeddings
_UpperCAmelCase = num_conv_pos_embedding_groups
_UpperCAmelCase = len(self.conv_dim )
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = feat_proj_dropout
_UpperCAmelCase = final_dropout
_UpperCAmelCase = layerdrop
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_ctc_classes
_UpperCAmelCase = vocab_size
_UpperCAmelCase = do_stable_layer_norm
_UpperCAmelCase = use_weighted_layer_sum
_UpperCAmelCase = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase = apply_spec_augment
_UpperCAmelCase = mask_time_prob
_UpperCAmelCase = mask_time_length
_UpperCAmelCase = mask_time_min_masks
_UpperCAmelCase = mask_feature_prob
_UpperCAmelCase = mask_feature_length
# parameters for pretraining with codevector quantized representations
_UpperCAmelCase = num_codevectors_per_group
_UpperCAmelCase = num_codevector_groups
_UpperCAmelCase = contrastive_logits_temperature
_UpperCAmelCase = num_negatives
_UpperCAmelCase = codevector_dim
_UpperCAmelCase = proj_codevector_dim
_UpperCAmelCase = diversity_loss_weight
# ctc loss
_UpperCAmelCase = ctc_loss_reduction
_UpperCAmelCase = ctc_zero_infinity
# adapter
_UpperCAmelCase = add_adapter
_UpperCAmelCase = adapter_kernel_size
_UpperCAmelCase = adapter_stride
_UpperCAmelCase = num_adapter_layers
_UpperCAmelCase = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = list(__UpperCamelCase )
_UpperCAmelCase = xvector_output_dim
@property
def _snake_case ( self : List[str] ) ->Tuple:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 701 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A , _A , _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = requests.get(_A , headers=_A , allow_redirects=_A )
_UpperCAmelCase = result.headers["""Location"""]
_UpperCAmelCase = requests.get(_A , allow_redirects=_A )
_UpperCAmelCase = os.path.join(_A , F"""{artifact_name}.zip""" )
with open(_A , """wb""" ) as fp:
fp.write(response.content )
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = None
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_A ) as f:
for line in f:
_UpperCAmelCase = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_UpperCAmelCase = line[: line.index(""": """ )]
_UpperCAmelCase = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_UpperCAmelCase = line[len("""FAILED """ ) :]
failed_tests.append(_A )
elif filename == "job_name.txt":
_UpperCAmelCase = line
if len(_A ) != len(_A ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(_A )} for `errors` """
F"""and {len(_A )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
_UpperCAmelCase = None
if job_name and job_links:
_UpperCAmelCase = job_links.get(_A , _A )
# A list with elements of the form (line of error, error, failed test)
_UpperCAmelCase = [x + [y] + [job_link] for x, y in zip(_A , _A )]
return result
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = [os.path.join(_A , _A ) for p in os.listdir(_A ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_A , job_links=_A ) )
return errors
def _UpperCamelCase ( _A , _A=None ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_UpperCAmelCase = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_UpperCAmelCase = test.split("""/""" )[2]
else:
_UpperCAmelCase = None
return test
def _UpperCamelCase ( _A , _A=None ) -> Any:
"""simple docstring"""
_UpperCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_UpperCAmelCase = [x for x in logs if x[2] is not None]
_UpperCAmelCase = {x[2] for x in logs}
_UpperCAmelCase = {}
for test in tests:
_UpperCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_UpperCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_UpperCAmelCase = {"""count""": n_errors, """errors""": error_counts}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = """| no. | error | status |"""
_UpperCAmelCase = """|-:|:-|:-|"""
_UpperCAmelCase = [header, sep]
for error in reduced_by_error:
_UpperCAmelCase = reduced_by_error[error]["""count"""]
_UpperCAmelCase = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(_A )
return "\n".join(_A )
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = """| model | no. of errors | major error | count |"""
_UpperCAmelCase = """|-:|-:|-:|-:|"""
_UpperCAmelCase = [header, sep]
for model in reduced_by_model:
_UpperCAmelCase = reduced_by_model[model]["""count"""]
_UpperCAmelCase ,_UpperCAmelCase = list(reduced_by_model[model]["""errors"""].items() )[0]
_UpperCAmelCase = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(_A )
return "\n".join(_A )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
a : Dict = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a : Tuple = get_job_links(args.workflow_run_id, token=args.token)
a : Tuple = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a : List[Any] = k.find(''' / ''')
a : Tuple = k[index + len(''' / ''') :]
a : int = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a : Union[str, Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a : Optional[int] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a : int = reduce_by_error(errors)
a : str = reduce_by_model(errors)
a : int = make_github_table(reduced_by_error)
a : Optional[int] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa) | 19 | 0 |
"""simple docstring"""
from collections import Counter
from timeit import timeit
def _UpperCamelCase ( _A = "" , ) -> Tuple:
return sum(c % 2 for c in Counter(input_str.replace(""" """ , """""" ).lower() ).values() ) < 2
def _UpperCamelCase ( _A = "" ) -> List[Any]:
if len(snake_case_ ) == 0:
return True
_UpperCAmelCase = input_str.replace(""" """ , """""" ).lower()
# character_freq_dict: Stores the frequency of every character in the input string
_UpperCAmelCase = {}
for character in lower_case_input_str:
_UpperCAmelCase = character_freq_dict.get(snake_case_ , 0 ) + 1
_UpperCAmelCase = 0
for character_count in character_freq_dict.values():
if character_count % 2:
odd_char += 1
if odd_char > 1:
return False
return True
def _UpperCamelCase ( _A = "" ) -> Dict:
print("""\nFor string = """ , snake_case_ , """:""" )
print(
"""> can_string_be_rearranged_as_palindrome_counter()""" , """\tans =""" , can_string_be_rearranged_as_palindrome_counter(snake_case_ ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome_counter(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
print(
"""> can_string_be_rearranged_as_palindrome()""" , """\tans =""" , can_string_be_rearranged_as_palindrome(snake_case_ ) , """\ttime =""" , timeit(
"""z.can_string_be_rearranged_as_palindrome(z.check_str)""" , setup="""import __main__ as z""" , ) , """seconds""" , )
if __name__ == "__main__":
a : Union[str, Any] = input(
'''Enter string to determine if it can be rearranged as a palindrome or not: '''
).strip()
benchmark(check_str)
a : Any = can_string_be_rearranged_as_palindrome_counter(check_str)
print(F"{check_str} can {'' if status else 'not '}be rearranged as a palindrome") | 702 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( _UpperCAmelCase ):
a : Any = ['image_processor', 'tokenizer']
a : Optional[int] = 'AutoImageProcessor'
a : Any = 'AutoTokenizer'
def __init__( self : List[str] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=None , **__UpperCamelCase : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop("""feature_extractor""" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def __call__( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->List[str]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""images""" , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""text""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_UpperCAmelCase = self.image_processor(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if text is not None:
_UpperCAmelCase = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCAmelCase = encodings["""input_ids"""]
return inputs
def _snake_case ( self : Union[str, Any] , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ) ->Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ) ->int:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@contextmanager
def _snake_case ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer
yield
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def _snake_case ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Union[str, Any]=None ) ->List[str]:
'''simple docstring'''
if added_vocab is None:
_UpperCAmelCase = self.tokenizer.get_added_vocab()
_UpperCAmelCase = {}
while tokens:
_UpperCAmelCase = re.search(r"""<s_(.*?)>""" , __UpperCamelCase , re.IGNORECASE )
if start_token is None:
break
_UpperCAmelCase = start_token.group(1 )
_UpperCAmelCase = re.search(rf"""</s_{key}>""" , __UpperCamelCase , re.IGNORECASE )
_UpperCAmelCase = start_token.group()
if end_token is None:
_UpperCAmelCase = tokens.replace(__UpperCamelCase , """""" )
else:
_UpperCAmelCase = end_token.group()
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __UpperCamelCase , re.IGNORECASE )
if content is not None:
_UpperCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_UpperCAmelCase = self.tokenajson(__UpperCamelCase , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if value:
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = value[0]
_UpperCAmelCase = value
else: # leaf nodes
_UpperCAmelCase = []
for leaf in content.split(r"""<sep/>""" ):
_UpperCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_UpperCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCamelCase )
if len(output[key] ) == 1:
_UpperCAmelCase = output[key][0]
_UpperCAmelCase = tokens[tokens.find(__UpperCamelCase ) + len(__UpperCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if len(__UpperCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , )
return self.image_processor | 19 | 0 |
"""simple docstring"""
import sys
from collections import defaultdict
class a_ :
def __init__( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = []
def _snake_case ( self : int , __UpperCamelCase : List[str] ) ->Dict:
'''simple docstring'''
return self.node_position[vertex]
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Any , __UpperCamelCase : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = pos
def _snake_case ( self : List[Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Dict , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] ) ->Optional[int]:
'''simple docstring'''
if start > size // 2 - 1:
return
else:
if 2 * start + 2 >= size:
_UpperCAmelCase = 2 * start + 1
else:
if heap[2 * start + 1] < heap[2 * start + 2]:
_UpperCAmelCase = 2 * start + 1
else:
_UpperCAmelCase = 2 * start + 2
if heap[smallest_child] < heap[start]:
_UpperCAmelCase = heap[smallest_child], positions[smallest_child]
_UpperCAmelCase = (
heap[start],
positions[start],
)
_UpperCAmelCase = temp, tempa
_UpperCAmelCase = self.get_position(positions[smallest_child] )
self.set_position(
positions[smallest_child] , self.get_position(positions[start] ) )
self.set_position(positions[start] , __UpperCamelCase )
self.top_to_bottom(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = position[index]
while index != 0:
_UpperCAmelCase = int((index - 2) / 2 ) if index % 2 == 0 else int((index - 1) / 2 )
if val < heap[parent]:
_UpperCAmelCase = heap[parent]
_UpperCAmelCase = position[parent]
self.set_position(position[parent] , __UpperCamelCase )
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(__UpperCamelCase , __UpperCamelCase )
break
_UpperCAmelCase = parent
else:
_UpperCAmelCase = val
_UpperCAmelCase = temp
self.set_position(__UpperCamelCase , 0 )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = len(__UpperCamelCase ) // 2 - 1
for i in range(__UpperCamelCase , -1 , -1 ):
self.top_to_bottom(__UpperCamelCase , __UpperCamelCase , len(__UpperCamelCase ) , __UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = positions[0]
_UpperCAmelCase = sys.maxsize
self.top_to_bottom(__UpperCamelCase , 0 , len(__UpperCamelCase ) , __UpperCamelCase )
return temp
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Heap()
_UpperCAmelCase = [0] * len(_lowerCamelCase )
_UpperCAmelCase = [-1] * len(_lowerCamelCase ) # Neighboring Tree Vertex of selected vertex
# Minimum Distance of explored vertex with neighboring vertex of partial tree
# formed in graph
_UpperCAmelCase = [] # Heap of Distance of vertices from their neighboring vertex
_UpperCAmelCase = []
for vertex in range(len(_lowerCamelCase ) ):
distance_tv.append(sys.maxsize )
positions.append(_lowerCamelCase )
heap.node_position.append(_lowerCamelCase )
_UpperCAmelCase = []
_UpperCAmelCase = 1
_UpperCAmelCase = sys.maxsize
for neighbor, distance in adjacency_list[0]:
_UpperCAmelCase = 0
_UpperCAmelCase = distance
heap.heapify(_lowerCamelCase , _lowerCamelCase )
for _ in range(1 , len(_lowerCamelCase ) ):
_UpperCAmelCase = heap.delete_minimum(_lowerCamelCase , _lowerCamelCase )
if visited[vertex] == 0:
tree_edges.append((nbr_tv[vertex], vertex) )
_UpperCAmelCase = 1
for neighbor, distance in adjacency_list[vertex]:
if (
visited[neighbor] == 0
and distance < distance_tv[heap.get_position(_lowerCamelCase )]
):
_UpperCAmelCase = distance
heap.bottom_to_top(
_lowerCamelCase , heap.get_position(_lowerCamelCase ) , _lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase = vertex
return tree_edges
if __name__ == "__main__": # pragma: no cover
# < --------- Prims Algorithm --------- >
a : Union[str, Any] = int(input('''Enter number of edges: ''').strip())
a : Dict = defaultdict(list)
for _ in range(edges_number):
a : Any = [int(x) for x in input().strip().split()]
adjacency_list[edge[0]].append([edge[1], edge[2]])
adjacency_list[edge[1]].append([edge[0], edge[2]])
print(prisms_algorithm(adjacency_list)) | 703 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( _A , _A , _A ) -> float:
"""simple docstring"""
_UpperCAmelCase = x
_UpperCAmelCase = y
for step in range(_A ): # noqa: B007
_UpperCAmelCase = a * a - b * b + x
_UpperCAmelCase = 2 * a * b + y
_UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_A , 1 , 1 ) )
def _UpperCamelCase ( _A = 8_0_0 , _A = 6_0_0 , _A = -0.6 , _A = 0 , _A = 3.2 , _A = 5_0 , _A = True , ) -> Image.Image:
"""simple docstring"""
_UpperCAmelCase = Image.new("""RGB""" , (image_width, image_height) )
_UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(_A ):
for image_y in range(_A ):
# determine the figure-coordinates based on the image-coordinates
_UpperCAmelCase = figure_width / image_width * image_height
_UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
_UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
_UpperCAmelCase = get_distance(_A , _A , _A )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_UpperCAmelCase = get_color_coded_rgb(_A )
else:
_UpperCAmelCase = get_black_and_white_rgb(_A )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 19 | 0 |
"""simple docstring"""
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def _UpperCamelCase ( _A , _A , _A = "x" , _A = 1_0**-1_0 , _A = 1 , ) -> Any:
"""simple docstring"""
_UpperCAmelCase = symbols(_A )
_UpperCAmelCase = lambdify(_A , _A )
_UpperCAmelCase = lambdify(_A , diff(_A , _A ) )
_UpperCAmelCase = starting_point
while True:
if diff_function(_A ) != 0:
_UpperCAmelCase = prev_guess - multiplicity * func(_A ) / diff_function(
_A )
else:
raise ZeroDivisionError("""Could not find root""" ) from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess ) < precision:
return next_guess
_UpperCAmelCase = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(F"The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}")
# Find root of polynomial
# Find fourth Root of 5
print(F"The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}")
# Find value of e
print(
'''The root of log(y) - 1 = 0 is ''',
F"{newton_raphson('log(y) - 1', 2, variable='y')}",
)
# Exponential Roots
print(
'''The root of exp(x) - 1 = 0 is''',
F"{newton_raphson('exp(x) - 1', 1_0, precision=0.0_05)}",
)
# Find root of cos(x)
print(F"The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}") | 704 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a_ ( nn.Module ):
def __init__( self : List[str] , __UpperCamelCase : int = 16 , __UpperCamelCase : int = 88 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 32 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "geglu" , __UpperCamelCase : Optional[int] = None , ) ->Dict:
'''simple docstring'''
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__UpperCamelCase , attention_head_dim=__UpperCamelCase , in_channels=__UpperCamelCase , num_layers=__UpperCamelCase , dropout=__UpperCamelCase , norm_num_groups=__UpperCamelCase , cross_attention_dim=__UpperCamelCase , attention_bias=__UpperCamelCase , sample_size=__UpperCamelCase , num_vector_embeds=__UpperCamelCase , activation_fn=__UpperCamelCase , num_embeds_ada_norm=__UpperCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def _snake_case ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : bool = True , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase , cross_attention_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
def _UpperCamelCase ( _A , _A ) -> Tuple:
"""simple docstring"""
def get_matched_characters(_A , _A ) -> str:
_UpperCAmelCase = []
_UpperCAmelCase = min(len(_stra ) , len(_stra ) ) // 2
for i, l in enumerate(_stra ):
_UpperCAmelCase = int(max(0 , i - limit ) )
_UpperCAmelCase = int(min(i + limit + 1 , len(_stra ) ) )
if l in _stra[left:right]:
matched.append(snake_case__ )
_UpperCAmelCase = F"""{_stra[0:_stra.index(snake_case__ )]} {_stra[_stra.index(snake_case__ ) + 1:]}"""
return "".join(snake_case__ )
# matching characters
_UpperCAmelCase = get_matched_characters(snake_case__ , snake_case__ )
_UpperCAmelCase = get_matched_characters(snake_case__ , snake_case__ )
_UpperCAmelCase = len(snake_case__ )
# transposition
_UpperCAmelCase = (
len([(ca, ca) for ca, ca in zip(snake_case__ , snake_case__ ) if ca != ca] ) // 2
)
if not match_count:
_UpperCAmelCase = 0.0
else:
_UpperCAmelCase = (
1
/ 3
* (
match_count / len(snake_case__ )
+ match_count / len(snake_case__ )
+ (match_count - transpositions) / match_count
)
)
# common prefix up to 4 characters
_UpperCAmelCase = 0
for ca, ca in zip(stra[:4] , stra[:4] ):
if ca == ca:
prefix_len += 1
else:
break
return jaro + 0.1 * prefix_len * (1 - jaro)
if __name__ == "__main__":
import doctest
doctest.testmod()
print(jaro_winkler('''hello''', '''world''')) | 705 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( _A , _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = LxmertConfig.from_json_file(_A )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase = LxmertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_A , _A , _A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 19 | 0 |
def _UpperCamelCase ( _A , _A ) -> int:
"""simple docstring"""
return 1 if input_a == input_a else 0
def _UpperCamelCase ( ) -> None:
"""simple docstring"""
assert xnor_gate(0 , 0 ) == 1
assert xnor_gate(0 , 1 ) == 0
assert xnor_gate(1 , 0 ) == 0
assert xnor_gate(1 , 1 ) == 1
if __name__ == "__main__":
print(xnor_gate(0, 0))
print(xnor_gate(0, 1))
print(xnor_gate(1, 0))
print(xnor_gate(1, 1)) | 706 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
a : str = '''examples/'''
a : List[str] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a : Tuple = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
a : List[str] = '''README.md'''
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase ,_UpperCAmelCase = REPLACE_PATTERNS[pattern]
_UpperCAmelCase = replace.replace("""VERSION""" , _A )
_UpperCAmelCase = re_pattern.sub(_A , _A )
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_A )
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern="""examples""" )
def _UpperCamelCase ( _A , _A=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
_UpperCAmelCase = """🤗 Transformers currently provides the following architectures"""
_UpperCAmelCase = """1. Want to contribute a new model?"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase = f.readlines()
# Find the start of the list.
_UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
_UpperCAmelCase = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_A )
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = REPLACE_PATTERNS["""init"""][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def _UpperCamelCase ( _A=False ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
_UpperCAmelCase = default_version.base_version
elif patch:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_UpperCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
_UpperCAmelCase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
def _UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_version()
_UpperCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
_UpperCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
_UpperCAmelCase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_A )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work() | 19 | 0 |
"""simple docstring"""
import argparse
import shutil
from pathlib import Path
from tqdm import tqdm
from transformers import AutoTokenizer
def _UpperCamelCase ( _A , _A , _A , _A=1_0_2_4 ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = [], []
_UpperCAmelCase = list(zip(UpperCAmelCase__ , UpperCAmelCase__ ) )
_UpperCAmelCase = sorted_examples[0]
def is_too_big(_A ):
return tok(UpperCAmelCase__ , return_tensors="""pt""" ).input_ids.shape[1] > max_tokens
for src, tgt in tqdm(sorted_examples[1:] ):
_UpperCAmelCase = new_src + """ """ + src
_UpperCAmelCase = new_tgt + """ """ + tgt
if is_too_big(UpperCAmelCase__ ) or is_too_big(UpperCAmelCase__ ): # cant fit, finalize example
finished_src.append(UpperCAmelCase__ )
finished_tgt.append(UpperCAmelCase__ )
_UpperCAmelCase = src, tgt
else: # can fit, keep adding
_UpperCAmelCase = cand_src, cand_tgt
# cleanup
if new_src:
assert new_tgt
finished_src.append(UpperCAmelCase__ )
finished_tgt.append(UpperCAmelCase__ )
return finished_src, finished_tgt
def _UpperCamelCase ( _A , _A , _A , _A ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = Path(UpperCAmelCase__ )
save_path.mkdir(exist_ok=UpperCAmelCase__ )
for split in ["train"]:
_UpperCAmelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
_UpperCAmelCase = [x.rstrip() for x in Path(UpperCAmelCase__ ).open().readlines()]
_UpperCAmelCase = [x.rstrip() for x in Path(UpperCAmelCase__ ).open().readlines()]
_UpperCAmelCase = pack_examples(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
print(F"""packed {split} split from {len(UpperCAmelCase__ )} examples -> {len(UpperCAmelCase__ )}.""" )
Path(save_path / F"""{split}.source""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase__ ) )
Path(save_path / F"""{split}.target""" ).open("""w""" ).write("""\n""".join(UpperCAmelCase__ ) )
for split in ["val", "test"]:
_UpperCAmelCase = data_dir / F"""{split}.source""", data_dir / F"""{split}.target"""
shutil.copyfile(UpperCAmelCase__ , save_path / F"""{split}.source""" )
shutil.copyfile(UpperCAmelCase__ , save_path / F"""{split}.target""" )
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser()
parser.add_argument("""--tok_name""" , type=UpperCAmelCase__ , help="""like facebook/bart-large-cnn,t5-base, etc.""" )
parser.add_argument("""--max_seq_len""" , type=UpperCAmelCase__ , default=1_2_8 )
parser.add_argument("""--data_dir""" , type=UpperCAmelCase__ )
parser.add_argument("""--save_path""" , type=UpperCAmelCase__ )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = AutoTokenizer.from_pretrained(args.tok_name )
return pack_data_dir(UpperCAmelCase__ , Path(args.data_dir ) , args.max_seq_len , args.save_path )
if __name__ == "__main__":
packer_cli() | 707 |
"""simple docstring"""
from __future__ import annotations
def _UpperCamelCase ( _A ) -> None:
"""simple docstring"""
create_state_space_tree(_A , [] , 0 , [0 for i in range(len(_A ) )] )
def _UpperCamelCase ( _A , _A , _A , _A , ) -> None:
"""simple docstring"""
if index == len(_A ):
print(_A )
return
for i in range(len(_A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_UpperCAmelCase = True
create_state_space_tree(_A , _A , index + 1 , _A )
current_sequence.pop()
_UpperCAmelCase = False
a : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
a : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 19 | 0 |
"""simple docstring"""
from __future__ import annotations
import unittest
import numpy as np
from transformers import BlipTextConfig
from transformers.testing_utils import require_tf, slow
from transformers.utils import is_tf_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
if is_tf_available():
import tensorflow as tf
from transformers import TFBlipTextModel
from transformers.models.blip.modeling_tf_blip import TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST
class a_ :
def __init__( self : str , __UpperCamelCase : str , __UpperCamelCase : List[str]=12 , __UpperCamelCase : Optional[Any]=7 , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : str=True , __UpperCamelCase : Optional[int]=99 , __UpperCamelCase : List[Any]=32 , __UpperCamelCase : List[Any]=32 , __UpperCamelCase : Optional[Any]=2 , __UpperCamelCase : int=4 , __UpperCamelCase : Optional[Any]=37 , __UpperCamelCase : int=0.1 , __UpperCamelCase : str=0.1 , __UpperCamelCase : List[Any]=5_12 , __UpperCamelCase : Tuple=0.0_2 , __UpperCamelCase : Any=0 , __UpperCamelCase : List[Any]=None , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_input_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = projection_dim
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
_UpperCAmelCase = bos_token_id
def _snake_case ( self : Any ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = None
if self.use_input_mask:
_UpperCAmelCase = random_attention_mask([self.batch_size, self.seq_length] )
if input_mask is not None:
_UpperCAmelCase = input_mask.numpy()
_UpperCAmelCase = input_mask.shape
_UpperCAmelCase = np.random.randint(1 , seq_length - 1 , size=(batch_size,) )
for batch_idx, start_index in enumerate(lowerCAmelCase__ ):
_UpperCAmelCase = 1
_UpperCAmelCase = 0
_UpperCAmelCase = self.get_config()
return config, input_ids, tf.convert_to_tensor(lowerCAmelCase__ )
def _snake_case ( self : Tuple ) ->str:
'''simple docstring'''
return BlipTextConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , projection_dim=self.projection_dim , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , dropout=self.dropout , attention_dropout=self.attention_dropout , max_position_embeddings=self.max_position_embeddings , initializer_range=self.initializer_range , bos_token_id=self.bos_token_id , )
def _snake_case ( self : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = TFBlipTextModel(config=lowerCAmelCase__ )
_UpperCAmelCase = model(lowerCAmelCase__ , attention_mask=lowerCAmelCase__ , training=lowerCAmelCase__ )
_UpperCAmelCase = model(lowerCAmelCase__ , training=lowerCAmelCase__ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"input_ids": input_ids, "attention_mask": input_mask}
return config, inputs_dict
@require_tf
class a_ ( a__ , unittest.TestCase ):
a : int = (TFBlipTextModel,) if is_tf_available() else ()
a : Any = False
a : Union[str, Any] = False
a : Any = False
def _snake_case ( self : int ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = BlipTextModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=lowerCAmelCase__ , hidden_size=37 )
def _snake_case ( self : Optional[int] ) ->Optional[int]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*lowerCAmelCase__ )
def _snake_case ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
pass
def _snake_case ( self : List[Any] ) ->Tuple:
'''simple docstring'''
pass
@unittest.skip(reason="""Blip does not use inputs_embeds""" )
def _snake_case ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _snake_case ( self : str ) ->List[Any]:
'''simple docstring'''
pass
@unittest.skip(reason="""BlipTextModel has no base class and is not available in MODEL_MAPPING""" )
def _snake_case ( self : Any ) ->List[Any]:
'''simple docstring'''
pass
@slow
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
for model_name in TF_BLIP_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = TFBlipTextModel.from_pretrained(lowerCAmelCase__ )
self.assertIsNotNone(lowerCAmelCase__ )
def _snake_case ( self : Dict , __UpperCamelCase : Any=True ) ->Any:
'''simple docstring'''
super().test_pt_tf_model_equivalence(allow_missing_keys=lowerCAmelCase__ ) | 708 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : int=32 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : Dict=3 , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=32 , __UpperCamelCase : Any=4 , __UpperCamelCase : Optional[int]=[0, 1, 2, 3] , __UpperCamelCase : str=4 , __UpperCamelCase : Optional[Any]=37 , __UpperCamelCase : str="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Any=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : int=[1, 3_84, 24, 24] , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any=None , ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = backbone_out_indices
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = backbone_featmap_shape
_UpperCAmelCase = scope
_UpperCAmelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 1_92, 3_84, 7_68],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _snake_case ( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _snake_case ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a : int = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : str = False
a : List[str] = False
a : Dict = False
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = DPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def _snake_case ( self : Optional[int] ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
pass
def _snake_case ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def _snake_case ( self : str ) ->Any:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ):
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = False
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
_UpperCAmelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_UpperCAmelCase = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self : Dict ) ->Tuple:
'''simple docstring'''
pass
@slow
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_UpperCAmelCase = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = """add"""
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class a_ ( unittest.TestCase ):
def _snake_case ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
_UpperCAmelCase = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
_UpperCAmelCase = outputs.predicted_depth
# verify the predicted depth
_UpperCAmelCase = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , __UpperCamelCase , atol=1e-4 ) ) | 19 | 0 |
"""simple docstring"""
from numpy import exp, pi, sqrt
def _UpperCamelCase ( _A , _A = 0.0 , _A = 1.0 ) -> List[Any]:
"""simple docstring"""
return 1 / sqrt(2 * pi * sigma**2 ) * exp(-((x - mu) ** 2) / (2 * sigma**2) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 709 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a : List[str] = logging.get_logger(__name__)
class a_ ( enum.Enum ):
a : Optional[Any] = 0
a : Dict = 1
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'generated'
def __init__( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : str ) ->Any:
'''simple docstring'''
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _snake_case ( self : Optional[int] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Dict=None , **__UpperCamelCase : Any , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
if truncation is not None:
_UpperCAmelCase = truncation
_UpperCAmelCase = generate_kwargs
_UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
_UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCAmelCase = self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
_UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _snake_case ( self : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
return True
def _snake_case ( self : Optional[Any] , *__UpperCamelCase : Any , __UpperCamelCase : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
_UpperCAmelCase = ([prefix + arg for arg in args[0]],)
_UpperCAmelCase = True
elif isinstance(args[0] , __UpperCamelCase ):
_UpperCAmelCase = (prefix + args[0],)
_UpperCAmelCase = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
_UpperCAmelCase = self.tokenizer(*__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Dict , *__UpperCamelCase : str , **__UpperCamelCase : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = super().__call__(*__UpperCamelCase , **__UpperCamelCase )
if (
isinstance(args[0] , __UpperCamelCase )
and all(isinstance(__UpperCamelCase , __UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _snake_case ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : str=TruncationStrategy.DO_NOT_TRUNCATE , **__UpperCamelCase : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self._parse_and_tokenize(__UpperCamelCase , truncation=__UpperCamelCase , **__UpperCamelCase )
return inputs
def _snake_case ( self : str , __UpperCamelCase : Dict , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
if self.framework == "pt":
_UpperCAmelCase ,_UpperCAmelCase = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
_UpperCAmelCase ,_UpperCAmelCase = tf.shape(model_inputs["""input_ids"""] ).numpy()
_UpperCAmelCase = generate_kwargs.get("""min_length""" , self.model.config.min_length )
_UpperCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__UpperCamelCase , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
_UpperCAmelCase = self.model.generate(**__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCAmelCase = output_ids.reshape(__UpperCamelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
_UpperCAmelCase = tf.reshape(__UpperCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any]=ReturnType.TEXT , __UpperCamelCase : int=False ) ->Any:
'''simple docstring'''
_UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCAmelCase = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
_UpperCAmelCase = {
f"""{self.return_name}_text""": self.tokenizer.decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , )
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'summary'
def __call__( self : Optional[Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[int] ) ->Any:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : str , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : Optional[int] = 'translation'
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def _snake_case ( self : Tuple , *__UpperCamelCase : List[str] , __UpperCamelCase : Tuple=TruncationStrategy.DO_NOT_TRUNCATE , __UpperCamelCase : Tuple=None , __UpperCamelCase : Union[str, Any]=None ) ->Tuple:
'''simple docstring'''
if getattr(self.tokenizer , """_build_translation_inputs""" , __UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase , return_tensors=self.framework , truncation=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase , truncation=__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : int=None , __UpperCamelCase : int=None , **__UpperCamelCase : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
_UpperCAmelCase = src_lang
if tgt_lang is not None:
_UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCAmelCase = kwargs.get("""task""" , self.task )
_UpperCAmelCase = task.split("""_""" )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
_UpperCAmelCase = items[1]
_UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->int:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
import warnings
from ...utils import logging
from .image_processing_clip import CLIPImageProcessor
a : int = logging.get_logger(__name__)
class a_ ( __UpperCAmelCase ):
def __init__( self : Optional[Any] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Dict ) ->Optional[int]:
warnings.warn(
"""The class CLIPFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use CLIPImageProcessor instead.""" , UpperCAmelCase_ , )
super().__init__(*UpperCAmelCase_ , **UpperCAmelCase_ ) | 710 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
@property
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__UpperCamelCase )
def _snake_case ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.dummy_uncond_unet
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = self.dummy_vq_model
_UpperCAmelCase = LDMPipeline(unet=__UpperCamelCase , vqvae=__UpperCamelCase , scheduler=__UpperCamelCase )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" ).images
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" , return_dict=__UpperCamelCase )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a_ ( unittest.TestCase ):
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=5 , output_type="""numpy""" ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCAmelCase = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 19 | 0 |
"""simple docstring"""
from typing import Dict
from transformers import EvalPrediction, HfArgumentParser, TrainingArguments, is_torch_available
from transformers.testing_utils import (
TestCasePlus,
execute_subprocess_async,
get_torch_dist_unique_port,
require_torch_multi_gpu,
require_torch_neuroncore,
)
from transformers.training_args import ParallelMode
from transformers.utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
if is_torch_available():
import torch
from torch import nn
from torch.utils.data import Dataset
from transformers import Trainer
class a_ ( UpperCamelCase_ ):
def __init__( self : Union[str, Any] , __UpperCamelCase : int = 1_01 ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = length
def __len__( self : Dict ) ->Dict:
'''simple docstring'''
return self.length
def __getitem__( self : int , __UpperCamelCase : str ) ->Optional[int]:
'''simple docstring'''
return i
class a_ :
def __call__( self : Dict , __UpperCamelCase : List[Any] ) ->List[Any]:
'''simple docstring'''
return {"input_ids": torch.tensor(UpperCamelCase__ ), "labels": torch.tensor(UpperCamelCase__ )}
class a_ ( nn.Module ):
def __init__( self : List[str] ) ->List[Any]:
'''simple docstring'''
super().__init__()
# Add some (unused) params otherwise DDP will complain.
_UpperCAmelCase = nn.Linear(1_20 , 80 )
def _snake_case ( self : List[Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Any=None ) ->Tuple:
'''simple docstring'''
if labels is not None:
return torch.tensor(0.0 , device=input_ids.device ), input_ids
else:
return input_ids
class a_ ( UpperCamelCase_ ):
@require_torch_neuroncore
def _snake_case ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = f"""--nproc_per_node=2
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f"""--output_dir {output_dir}""".split()
_UpperCAmelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(UpperCamelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
class a_ ( UpperCamelCase_ ):
@require_torch_multi_gpu
def _snake_case ( self : str ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = f"""--nproc_per_node={torch.cuda.device_count()}
--master_port={get_torch_dist_unique_port()}
{self.test_file_dir}/test_trainer_distributed.py
""".split()
_UpperCAmelCase = self.get_auto_remove_tmp_dir()
_UpperCAmelCase = f"""--output_dir {output_dir}""".split()
_UpperCAmelCase = ['''torchrun'''] + distributed_args + args
execute_subprocess_async(UpperCamelCase__ , env=self.get_env() )
# successful return here == success - any errors would have caused an error in the sub-call
if __name__ == "__main__":
# The script below is meant to be run under torch.distributed, on a machine with multiple GPUs:
#
# PYTHONPATH="src" python -m torch.distributed.run --nproc_per_node 2 --output_dir output_dir ./tests/test_trainer_distributed.py
a : Any = HfArgumentParser((TrainingArguments,))
a : Tuple = parser.parse_args_into_dataclasses()[0]
logger.warning(
F"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}, "
F"distributed training: {training_args.parallel_mode != ParallelMode.NOT_DISTRIBUTED}"
)
# Essentially, what we want to verify in the distributed case is that we get all samples back,
# in the right order. (this is crucial for prediction for instance)
for dataset_length in [1_0_1, 4_0, 7]:
a : List[str] = DummyDataset(dataset_length)
def _UpperCamelCase ( _A ) -> str:
_UpperCAmelCase = list(range(len(_lowercase ) ) )
_UpperCAmelCase = p.predictions.tolist() == sequential and p.label_ids.tolist() == sequential
if not success and training_args.local_rank == 0:
logger.warning(
"""Predictions and/or labels do not match expected results:\n - predictions: """
F"""{p.predictions.tolist()}\n - labels: {p.label_ids.tolist()}\n - expected: {sequential}""" )
return {"success": success}
a : Any = Trainer(
model=DummyModel(),
args=training_args,
data_collator=DummyDataCollator(),
eval_dataset=dataset,
compute_metrics=compute_metrics,
)
a : str = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
a : Tuple = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
a : Any = 2
a : Any = trainer.evaluate()
logger.info(metrics)
if metrics["eval_success"] is not True:
logger.error(metrics)
exit(1)
a : Optional[int] = trainer.predict(dataset)
logger.info(p.metrics)
if p.metrics["test_success"] is not True:
logger.error(p.metrics)
exit(1)
a : List[str] = None | 711 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
a : str = True
except (ImportError, ModuleNotFoundError):
a : List[str] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
re.sub("""<n>""" , """""" , _A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_A ) ) | 19 | 0 |
"""simple docstring"""
from collections import namedtuple
import requests
from lxml import html # type: ignore
a : List[Any] = namedtuple('''covid_data''', '''cases deaths recovered''')
def _UpperCamelCase ( _A = "https://www.worldometers.info/coronavirus/" ) -> covid_data:
"""simple docstring"""
_UpperCAmelCase = """//div[@class = \"maincounter-number\"]/span/text()"""
return covid_data(*html.fromstring(requests.get(__UpperCamelCase ).content ).xpath(__UpperCamelCase ) )
a : Any = '''Total COVID-19 cases in the world: {}\nTotal deaths due to COVID-19 in the world: {}\nTotal COVID-19 patients recovered in the world: {}'''
print(fmt.format(*covid_stats())) | 712 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a : str = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a_ :
a : List[Any] = PegasusConfig
a : Dict = {}
a : List[Any] = 'gelu'
def __init__( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple=13 , __UpperCamelCase : Tuple=7 , __UpperCamelCase : Tuple=True , __UpperCamelCase : Any=False , __UpperCamelCase : Any=99 , __UpperCamelCase : Optional[int]=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Dict=37 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Optional[Any]=20 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Tuple=0 , ) ->int:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def _snake_case ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _snake_case ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _UpperCamelCase ( _A , _A , _A , _A=None , _A=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = np.not_equal(_A , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a_ ( _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a : Any = True
a : int = False
a : Union[str, Any] = False
a : Optional[int] = False
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase )
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(__UpperCamelCase : List[Any] , __UpperCamelCase : str=None , **__UpperCamelCase : int ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_UpperCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self : int ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__UpperCamelCase )
_UpperCAmelCase = np.ones((1, 1) )
_UpperCAmelCase = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_UpperCAmelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""np""" , truncation=__UpperCamelCase , max_length=5_12 , padding=__UpperCamelCase )
_UpperCAmelCase = model.generate(**__UpperCamelCase , num_beams=2 ).sequences
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert tgt_text == decoded | 19 | 0 |
"""simple docstring"""
import copy
import fnmatch
import json
import os
import pickle as pkl
import shutil
import sys
import tarfile
import tempfile
from collections import OrderedDict
from contextlib import contextmanager
from functools import partial
from hashlib import shaaaa
from io import BytesIO
from pathlib import Path
from urllib.parse import urlparse
from zipfile import ZipFile, is_zipfile
import cva
import numpy as np
import requests
import wget
from filelock import FileLock
from PIL import Image
from tqdm.auto import tqdm
from yaml import Loader, dump, load
try:
import torch
a : Union[str, Any] = True
except ImportError:
a : str = False
try:
from torch.hub import _get_torch_home
a : Union[str, Any] = _get_torch_home()
except ImportError:
a : Tuple = os.path.expanduser(
os.getenv('''TORCH_HOME''', os.path.join(os.getenv('''XDG_CACHE_HOME''', '''~/.cache'''), '''torch'''))
)
a : str = os.path.join(torch_cache_home, '''transformers''')
a : Dict = '''https://cdn.huggingface.co'''
a : int = '''https://s3.amazonaws.com/models.huggingface.co/bert'''
a : List[Any] = '''/'''.join(str(Path(__file__).resolve()).split('''/''')[:-1])
a : Any = os.path.join(PATH, '''config.yaml''')
a : str = os.path.join(PATH, '''attributes.txt''')
a : Dict = os.path.join(PATH, '''objects.txt''')
a : Optional[Any] = os.getenv('''PYTORCH_PRETRAINED_BERT_CACHE''', default_cache_path)
a : Dict = os.getenv('''PYTORCH_TRANSFORMERS_CACHE''', PYTORCH_PRETRAINED_BERT_CACHE)
a : Any = os.getenv('''TRANSFORMERS_CACHE''', PYTORCH_TRANSFORMERS_CACHE)
a : Optional[int] = '''pytorch_model.bin'''
a : List[Any] = '''config.yaml'''
def _UpperCamelCase ( _A=OBJECTS , _A=ATTRIBUTES ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = []
with open(_A ) as f:
for object in f.readlines():
vg_classes.append(object.split(""",""" )[0].lower().strip() )
_UpperCAmelCase = []
with open(_A ) as f:
for object in f.readlines():
vg_attrs.append(object.split(""",""" )[0].lower().strip() )
return vg_classes, vg_attrs
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = OrderedDict()
with open(_A , """rb""" ) as f:
_UpperCAmelCase = pkl.load(_A )["""model"""]
for k in copy.deepcopy(list(ckp.keys() ) ):
_UpperCAmelCase = ckp.pop(_A )
if isinstance(_A , np.ndarray ):
_UpperCAmelCase = torch.tensor(_A )
else:
assert isinstance(_A , torch.tensor ), type(_A )
_UpperCAmelCase = v
return r
class a_ :
a : Dict = {}
def __init__( self : Tuple , __UpperCamelCase : dict , __UpperCamelCase : str = "root" , __UpperCamelCase : Tuple=0 ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = name
_UpperCAmelCase = level
_UpperCAmelCase = {}
for k, v in dictionary.items():
if v is None:
raise ValueError()
_UpperCAmelCase = copy.deepcopy(__UpperCamelCase )
_UpperCAmelCase = copy.deepcopy(__UpperCamelCase )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = Config(__UpperCamelCase , name=__UpperCamelCase , level=level + 1 )
_UpperCAmelCase = v
setattr(self , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = d
def __repr__( self : Any ) ->int:
'''simple docstring'''
return str(list((self._pointer.keys()) ) )
def __setattr__( self : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = val
_UpperCAmelCase = val
_UpperCAmelCase = key.split(""".""" )
_UpperCAmelCase = len(__UpperCamelCase ) - 1
_UpperCAmelCase = self._pointer
if len(__UpperCamelCase ) > 1:
for i, l in enumerate(__UpperCamelCase ):
if hasattr(self , __UpperCamelCase ) and isinstance(getattr(self , __UpperCamelCase ) , __UpperCamelCase ):
setattr(getattr(self , __UpperCamelCase ) , """.""".join(levels[i:] ) , __UpperCamelCase )
if l == last_level:
_UpperCAmelCase = val
else:
_UpperCAmelCase = pointer[l]
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
return self._pointer
def _snake_case ( self : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] ) ->List[Any]:
'''simple docstring'''
with open(f"""{file_name}""" , """w""" ) as stream:
dump(__UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[Any] ) ->Optional[Any]:
'''simple docstring'''
with open(f"""{file_name}""" , """w""" ) as stream:
json.dump(__UpperCamelCase , __UpperCamelCase )
@staticmethod
def _snake_case ( __UpperCamelCase : Dict ) ->Union[str, Any]:
'''simple docstring'''
with open(__UpperCamelCase ) as stream:
_UpperCAmelCase = load(__UpperCamelCase , Loader=__UpperCamelCase )
return data
def __str__( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = """ """
if self._name != "root":
_UpperCAmelCase = f"""{t * (self._level-1)}{self._name}:\n"""
else:
_UpperCAmelCase = """"""
_UpperCAmelCase = self._level
for i, (k, v) in enumerate(self._pointer.items() ):
if isinstance(__UpperCamelCase , __UpperCamelCase ):
r += f"""{t * (self._level)}{v}\n"""
self._level += 1
else:
r += f"""{t * (self._level)}{k}: {v} ({type(__UpperCamelCase ).__name__})\n"""
_UpperCAmelCase = level
return r[:-1]
@classmethod
def _snake_case ( cls : List[Any] , __UpperCamelCase : str , **__UpperCamelCase : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
return cls(__UpperCamelCase )
@classmethod
def _snake_case ( cls : Tuple , __UpperCamelCase : str , **__UpperCamelCase : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = kwargs.pop("""cache_dir""" , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""force_download""" , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""resume_download""" , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""proxies""" , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""local_files_only""" , __UpperCamelCase )
if os.path.isdir(__UpperCamelCase ):
_UpperCAmelCase = os.path.join(__UpperCamelCase , __UpperCamelCase )
elif os.path.isfile(__UpperCamelCase ) or is_remote_url(__UpperCamelCase ):
_UpperCAmelCase = pretrained_model_name_or_path
else:
_UpperCAmelCase = hf_bucket_url(__UpperCamelCase , filename=__UpperCamelCase , use_cdn=__UpperCamelCase )
try:
# Load from URL or cache if already cached
_UpperCAmelCase = cached_path(
__UpperCamelCase , cache_dir=__UpperCamelCase , force_download=__UpperCamelCase , proxies=__UpperCamelCase , resume_download=__UpperCamelCase , local_files_only=__UpperCamelCase , )
# Load config dict
if resolved_config_file is None:
raise EnvironmentError
_UpperCAmelCase = Config.load_yaml(__UpperCamelCase )
except EnvironmentError:
_UpperCAmelCase = """Can't load config for"""
raise EnvironmentError(__UpperCamelCase )
if resolved_config_file == config_file:
print("""loading configuration file from path""" )
else:
print("""loading configuration file cache""" )
return Config.load_yaml(__UpperCamelCase ), kwargs
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = torch.load("""dump.pt""" , map_location=in_tensor.device )
_UpperCAmelCase = in_tensor.numpy()
_UpperCAmelCase = out_tensor.numpy()[0]
print(na.shape , na[0, 0, :5] )
print(na.shape , na[0, 0, :5] )
assert np.allclose(_A , _A , rtol=0.01 , atol=0.1 ), (
F"""{sum([1 for x in np.isclose(_A , _A , rtol=0.01 , atol=0.1 ).flatten() if x is False] )/len(na.flatten() )*1_0_0:.4f} %"""
" element-wise mismatch"
)
raise Exception("""tensors are all good""" )
# Hugging face functions below
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = urlparse(_A )
return parsed.scheme in ("http", "https")
def _UpperCamelCase ( _A , _A , _A=True ) -> str:
"""simple docstring"""
_UpperCAmelCase = CLOUDFRONT_DISTRIB_PREFIX if use_cdn else S3_BUCKET_PREFIX
_UpperCAmelCase = """/""" not in model_id
if legacy_format:
return F"""{endpoint}/{model_id}-{filename}"""
else:
return F"""{endpoint}/{model_id}/{filename}"""
def _UpperCamelCase ( _A , _A , _A=None , _A=0 , _A=None , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = """python/{}""".format(sys.version.split()[0] )
if _torch_available:
ua += "; torch/{}".format(torch.__version__ )
if isinstance(_A , _A ):
ua += "; " + "; ".join("""{}/{}""".format(_A , _A ) for k, v in user_agent.items() )
elif isinstance(_A , _A ):
ua += "; " + user_agent
_UpperCAmelCase = {"""user-agent""": ua}
if resume_size > 0:
_UpperCAmelCase = """bytes=%d-""" % (resume_size,)
_UpperCAmelCase = requests.get(_A , stream=_A , proxies=_A , headers=_A )
if response.status_code == 4_1_6: # Range not satisfiable
return
_UpperCAmelCase = response.headers.get("""Content-Length""" )
_UpperCAmelCase = resume_size + int(_A ) if content_length is not None else None
_UpperCAmelCase = tqdm(
unit="""B""" , unit_scale=_A , total=_A , initial=_A , desc="""Downloading""" , )
for chunk in response.iter_content(chunk_size=1_0_2_4 ):
if chunk: # filter out keep-alive new chunks
progress.update(len(_A ) )
temp_file.write(_A )
progress.close()
def _UpperCamelCase ( _A , _A=None , _A=False , _A=None , _A=1_0 , _A=False , _A=None , _A=False , ) -> str:
"""simple docstring"""
if cache_dir is None:
_UpperCAmelCase = TRANSFORMERS_CACHE
if isinstance(_A , _A ):
_UpperCAmelCase = str(_A )
os.makedirs(_A , exist_ok=_A )
_UpperCAmelCase = None
if not local_files_only:
try:
_UpperCAmelCase = requests.head(_A , allow_redirects=_A , proxies=_A , timeout=_A )
if response.status_code == 2_0_0:
_UpperCAmelCase = response.headers.get("""ETag""" )
except (EnvironmentError, requests.exceptions.Timeout):
# etag is already None
pass
_UpperCAmelCase = url_to_filename(_A , _A )
# get cache path to put the file
_UpperCAmelCase = os.path.join(_A , _A )
# etag is None = we don't have a connection, or url doesn't exist, or is otherwise inaccessible.
# try to get the last downloaded one
if etag is None:
if os.path.exists(_A ):
return cache_path
else:
_UpperCAmelCase = [
file
for file in fnmatch.filter(os.listdir(_A ) , filename + """.*""" )
if not file.endswith(""".json""" ) and not file.endswith(""".lock""" )
]
if len(_A ) > 0:
return os.path.join(_A , matching_files[-1] )
else:
# If files cannot be found and local_files_only=True,
# the models might've been found if local_files_only=False
# Notify the user about that
if local_files_only:
raise ValueError(
"""Cannot find the requested files in the cached path and outgoing traffic has been"""
""" disabled. To enable model look-ups and downloads online, set 'local_files_only'"""
""" to False.""" )
return None
# From now on, etag is not None.
if os.path.exists(_A ) and not force_download:
return cache_path
# Prevent parallel downloads of the same file with a lock.
_UpperCAmelCase = cache_path + """.lock"""
with FileLock(_A ):
# If the download just completed while the lock was activated.
if os.path.exists(_A ) and not force_download:
# Even if returning early like here, the lock will be released.
return cache_path
if resume_download:
_UpperCAmelCase = cache_path + """.incomplete"""
@contextmanager
def _resumable_file_manager():
with open(_A , """a+b""" ) as f:
yield f
_UpperCAmelCase = _resumable_file_manager
if os.path.exists(_A ):
_UpperCAmelCase = os.stat(_A ).st_size
else:
_UpperCAmelCase = 0
else:
_UpperCAmelCase = partial(tempfile.NamedTemporaryFile , dir=_A , delete=_A )
_UpperCAmelCase = 0
# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
with temp_file_manager() as temp_file:
print(
"""%s not found in cache or force_download set to True, downloading to %s""" , _A , temp_file.name , )
http_get(
_A , _A , proxies=_A , resume_size=_A , user_agent=_A , )
os.replace(temp_file.name , _A )
_UpperCAmelCase = {"""url""": url, """etag""": etag}
_UpperCAmelCase = cache_path + """.json"""
with open(_A , """w""" ) as meta_file:
json.dump(_A , _A )
return cache_path
def _UpperCamelCase ( _A , _A=None ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = url.encode("""utf-8""" )
_UpperCAmelCase = shaaaa(_A )
_UpperCAmelCase = url_hash.hexdigest()
if etag:
_UpperCAmelCase = etag.encode("""utf-8""" )
_UpperCAmelCase = shaaaa(_A )
filename += "." + etag_hash.hexdigest()
if url.endswith(""".h5""" ):
filename += ".h5"
return filename
def _UpperCamelCase ( _A , _A=None , _A=False , _A=None , _A=False , _A=None , _A=False , _A=False , _A=False , ) -> Dict:
"""simple docstring"""
if cache_dir is None:
_UpperCAmelCase = TRANSFORMERS_CACHE
if isinstance(_A , _A ):
_UpperCAmelCase = str(_A )
if isinstance(_A , _A ):
_UpperCAmelCase = str(_A )
if is_remote_url(_A ):
# URL, so get it from the cache (downloading if necessary)
_UpperCAmelCase = get_from_cache(
_A , cache_dir=_A , force_download=_A , proxies=_A , resume_download=_A , user_agent=_A , local_files_only=_A , )
elif os.path.exists(_A ):
# File, and it exists.
_UpperCAmelCase = url_or_filename
elif urlparse(_A ).scheme == "":
# File, but it doesn't exist.
raise EnvironmentError("""file {} not found""".format(_A ) )
else:
# Something unknown
raise ValueError("""unable to parse {} as a URL or as a local path""".format(_A ) )
if extract_compressed_file:
if not is_zipfile(_A ) and not tarfile.is_tarfile(_A ):
return output_path
# Path where we extract compressed archives
# We avoid '.' in dir name and add "-extracted" at the end: "./model.zip" => "./model-zip-extracted/"
_UpperCAmelCase = os.path.split(_A )
_UpperCAmelCase = output_file.replace(""".""" , """-""" ) + """-extracted"""
_UpperCAmelCase = os.path.join(_A , _A )
if os.path.isdir(_A ) and os.listdir(_A ) and not force_extract:
return output_path_extracted
# Prevent parallel extractions
_UpperCAmelCase = output_path + """.lock"""
with FileLock(_A ):
shutil.rmtree(_A , ignore_errors=_A )
os.makedirs(_A )
if is_zipfile(_A ):
with ZipFile(_A , """r""" ) as zip_file:
zip_file.extractall(_A )
zip_file.close()
elif tarfile.is_tarfile(_A ):
_UpperCAmelCase = tarfile.open(_A )
tar_file.extractall(_A )
tar_file.close()
else:
raise EnvironmentError("""Archive format of {} could not be identified""".format(_A ) )
return output_path_extracted
return output_path
def _UpperCamelCase ( _A , _A="," ) -> Optional[int]:
"""simple docstring"""
assert isinstance(_A , _A )
if os.path.isfile(_A ):
with open(_A ) as f:
_UpperCAmelCase = eval(f.read() )
else:
_UpperCAmelCase = requests.get(_A )
try:
_UpperCAmelCase = requests.json()
except Exception:
_UpperCAmelCase = req.content.decode()
assert data is not None, "could not connect"
try:
_UpperCAmelCase = eval(_A )
except Exception:
_UpperCAmelCase = data.split("""\n""" )
req.close()
return data
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = requests.get(_A )
_UpperCAmelCase = np.array(Image.open(BytesIO(response.content ) ) )
return img
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = url.split("""/""" )[-1]
if fn not in os.listdir(os.getcwd() ):
wget.download(_A )
with open(_A , """rb""" ) as stream:
_UpperCAmelCase = pkl.load(_A )
_UpperCAmelCase = weights.pop("""model""" )
_UpperCAmelCase = {}
for k, v in model.items():
_UpperCAmelCase = torch.from_numpy(_A )
if "running_var" in k:
_UpperCAmelCase = torch.tensor([0] )
_UpperCAmelCase = k.replace("""running_var""" , """num_batches_tracked""" )
_UpperCAmelCase = zero
return new
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
print(F"""{os.path.abspath(os.path.join(_A , os.pardir ) )}/demo.ipynb""" )
def _UpperCamelCase ( _A , _A="RGB" ) -> Any:
"""simple docstring"""
assert isinstance(_A , _A )
if os.path.isfile(_A ):
_UpperCAmelCase = cva.imread(_A )
else:
_UpperCAmelCase = get_image_from_url(_A )
assert img is not None, F"""could not connect to: {im}"""
_UpperCAmelCase = cva.cvtColor(_A , cva.COLOR_BGR2RGB )
if input_format == "RGB":
_UpperCAmelCase = img[:, :, ::-1]
return img
def _UpperCamelCase ( _A , _A=1 ) -> int:
"""simple docstring"""
return (images[i : i + batch] for i in range(0 , len(_A ) , _A )) | 713 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : Optional[int]=13 , __UpperCamelCase : List[str]=7 , __UpperCamelCase : List[Any]=9 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : str=True , __UpperCamelCase : int=False , __UpperCamelCase : int=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : List[Any]=8 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : str=0.0_0_2 , __UpperCamelCase : Union[str, Any]=1 , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : Tuple=0 , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Any=None , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = encoder_seq_length
_UpperCAmelCase = decoder_seq_length
# For common tests
_UpperCAmelCase = self.decoder_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = d_ff
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = decoder_start_token_id
_UpperCAmelCase = None
_UpperCAmelCase = decoder_layers
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
return TaConfig.from_pretrained("""google/umt5-base""" )
def _snake_case ( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : str=None , ) ->int:
'''simple docstring'''
if attention_mask is None:
_UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCamelCase )
if decoder_head_mask is None:
_UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
if cross_attn_head_mask is None:
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = config.num_attention_heads
_UpperCAmelCase = self.prepare_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, input_dict
def _snake_case ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : Tuple ) ->Dict:
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , )
_UpperCAmelCase = model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase )
_UpperCAmelCase = result.last_hidden_state
_UpperCAmelCase = result.past_key_values
_UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , ) ->str:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).get_decoder().to(__UpperCamelCase ).eval()
# first forward pass
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) + 1 )
_UpperCAmelCase ,_UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = model(__UpperCamelCase )["""last_hidden_state"""]
_UpperCAmelCase = model(__UpperCamelCase , past_key_values=__UpperCamelCase )["""last_hidden_state"""]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def _snake_case ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Dict , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).to(__UpperCamelCase ).half().eval()
_UpperCAmelCase = model(**__UpperCamelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__UpperCamelCase ).any().item() )
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a : Optional[Any] = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a : Any = True
a : Optional[int] = False
a : Any = False
a : Optional[int] = True
a : Optional[Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a : int = [0.8, 0.9]
def _snake_case ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = UMTaModel(config_and_inputs[0] ).to(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=__UpperCamelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs[0]
_UpperCAmelCase = UMTaForConditionalGeneration(__UpperCamelCase ).eval()
model.to(__UpperCamelCase )
_UpperCAmelCase = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__UpperCamelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
}
for attn_name, (name, mask) in zip(__UpperCamelCase , head_masking.items() ):
_UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCamelCase )
_UpperCAmelCase = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , **__UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _snake_case ( self : Tuple ) ->List[Any]:
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__UpperCamelCase ).to(__UpperCamelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__UpperCamelCase , legacy=__UpperCamelCase )
_UpperCAmelCase = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase ).input_ids
# fmt: off
_UpperCAmelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model.generate(input_ids.to(__UpperCamelCase ) )
_UpperCAmelCase = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxSeqaSeqConfigWithPast
from ...utils import logging
a : Any = logging.get_logger(__name__)
a : int = {
'''google/umt5-small''': '''https://huggingface.co/google/umt5-small/resolve/main/config.json''',
# See all umt5 models at https://huggingface.co/models?filter=umt5
}
class a_ ( lowercase__ ):
a : Tuple = """umt5"""
a : Optional[int] = ["""past_key_values"""]
def __init__( self : Dict , __UpperCamelCase : str=25_01_12 , __UpperCamelCase : Tuple=5_12 , __UpperCamelCase : str=64 , __UpperCamelCase : List[Any]=10_24 , __UpperCamelCase : Dict=8 , __UpperCamelCase : List[str]=None , __UpperCamelCase : List[str]=6 , __UpperCamelCase : Dict=32 , __UpperCamelCase : Optional[int]=1_28 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Dict=1e-6 , __UpperCamelCase : List[str]=1.0 , __UpperCamelCase : int="gated-gelu" , __UpperCamelCase : List[str]=True , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : Optional[int]="T5Tokenizer" , __UpperCamelCase : List[str]=True , __UpperCamelCase : Any=0 , __UpperCamelCase : int=1 , __UpperCamelCase : List[Any]=0 , **__UpperCamelCase : List[Any] , ) ->int:
'''simple docstring'''
super().__init__(
is_encoder_decoder=__lowercase , tokenizer_class=__lowercase , tie_word_embeddings=__lowercase , pad_token_id=__lowercase , eos_token_id=__lowercase , decoder_start_token_id=__lowercase , **__lowercase , )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = d_model
_UpperCAmelCase = d_kv
_UpperCAmelCase = d_ff
_UpperCAmelCase = num_layers
_UpperCAmelCase = (
num_decoder_layers if num_decoder_layers is not None else self.num_layers
) # default = symmetry
_UpperCAmelCase = num_heads
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = relative_attention_max_distance
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = layer_norm_epsilon
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = feed_forward_proj
_UpperCAmelCase = use_cache
_UpperCAmelCase = self.feed_forward_proj.split("""-""" )
_UpperCAmelCase = act_info[-1]
_UpperCAmelCase = act_info[0] == """gated"""
if len(__lowercase ) > 1 and act_info[0] != "gated" or len(__lowercase ) > 2:
raise ValueError(
f"""`feed_forward_proj`: {feed_forward_proj} is not a valid activation function of the dense layer."""
"""Please make sure `feed_forward_proj` is of the format `gated-{ACT_FN}` or `{ACT_FN}`, e.g. """
"""'gated-gelu' or 'relu'""" )
if feed_forward_proj == "gated-gelu":
_UpperCAmelCase = """gelu_new"""
@property
def _snake_case ( self : int ) ->Optional[Any]:
'''simple docstring'''
return self.d_model
@property
def _snake_case ( self : int ) ->Any:
'''simple docstring'''
return self.num_heads
@property
def _snake_case ( self : str ) ->List[Any]:
'''simple docstring'''
return self.num_layers
class a_ ( lowercase__ ):
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.inputs
def _snake_case ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = {
"""input_ids""": {0: """batch""", 1: """encoder_sequence"""},
"""attention_mask""": {0: """batch""", 1: """encoder_sequence"""},
}
if self.use_past:
_UpperCAmelCase = """past_encoder_sequence + sequence"""
_UpperCAmelCase = {0: """batch"""}
_UpperCAmelCase = {0: """batch""", 1: """past_decoder_sequence + sequence"""}
else:
_UpperCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
_UpperCAmelCase = {0: """batch""", 1: """decoder_sequence"""}
if self.use_past:
self.fill_with_past_key_values_(__lowercase , direction="""inputs""" )
return common_inputs
@property
# Copied from transformers.models.t5.configuration_t5.T5OnnxConfig.default_onnx_opset
def _snake_case ( self : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
return 13
@property
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
return 5e-4 | 714 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( _UpperCAmelCase ):
def _snake_case ( self : str ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : Any ) ->List[str]:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _snake_case ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : str ) ->Dict:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def _snake_case ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def _snake_case ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _snake_case ( self : str ) ->Optional[Any]:
'''simple docstring'''
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__UpperCamelCase ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
_UpperCAmelCase ,_UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __UpperCamelCase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(_A ) if isinstance(_A , pa.Buffer ) else pa.memory_map(_A )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=_A , features=_A ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_A )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
_UpperCAmelCase = os.path.join(_A , """test.arrow""" )
with ArrowWriter(path=_A , schema=pa.schema(_A ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(_A , 1 )
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
if pa.types.is_list(_A ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
if isinstance(lst[0] , _A ):
change_first_primitive_element_in_list(lst[0] , _A )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(_A , optimized_int_type=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(_A )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_A , _A )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=_A ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = """mock://dataset-train.arrow"""
with ArrowWriter(path=_A , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_A ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_A )
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_A , format="""png""" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=_A , features=Features({"""image""": Image()} ) , embed_local_files=_A ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , _A )
with open(_A , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("""col_1""" , pa.string() , nullable=_A )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=_A ) as writer:
writer._build_writer(inferred_schema=_A )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] ) | 19 | 0 |
"""simple docstring"""
import inspect
import unittest
from datasets import load_dataset
from packaging import version
from transformers import BeitConfig
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import (
MODEL_MAPPING,
BeitForImageClassification,
BeitForMaskedImageModeling,
BeitForSemanticSegmentation,
BeitModel,
)
from transformers.models.beit.modeling_beit import BEIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
import PIL
from PIL import Image
from transformers import BeitImageProcessor
class a_ :
def __init__( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : str=1_00 , __UpperCamelCase : Union[str, Any]=13 , __UpperCamelCase : Optional[int]=30 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : List[str]=3 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Tuple=32 , __UpperCamelCase : List[Any]=4 , __UpperCamelCase : Optional[Any]=4 , __UpperCamelCase : Dict=37 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : int=0.1 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : str=10 , __UpperCamelCase : List[str]=0.0_2 , __UpperCamelCase : int=3 , __UpperCamelCase : List[str]=None , __UpperCamelCase : Tuple=[0, 1, 2, 3] , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = 1_00
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = scope
_UpperCAmelCase = out_indices
_UpperCAmelCase = num_labels
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels, pixel_labels
def _snake_case ( self : Tuple ) ->str:
'''simple docstring'''
return BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=A_ , initializer_range=self.initializer_range , out_indices=self.out_indices , )
def _snake_case ( self : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = BeitModel(config=A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase = model(A_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = BeitForMaskedImageModeling(config=A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase = model(A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _snake_case ( self : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = BeitForImageClassification(A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(A_ , labels=A_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
def _snake_case ( self : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = BeitForSemanticSegmentation(A_ )
model.to(A_ )
model.eval()
_UpperCAmelCase = model(A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
_UpperCAmelCase = model(A_ , labels=A_ )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size * 2, self.image_size * 2) )
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( __snake_case , __snake_case , unittest.TestCase ):
a : List[str] = (
(BeitModel, BeitForImageClassification, BeitForMaskedImageModeling, BeitForSemanticSegmentation)
if is_torch_available()
else ()
)
a : List[str] = (
{
'feature-extraction': BeitModel,
'image-classification': BeitForImageClassification,
'image-segmentation': BeitForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : Optional[Any] = False
a : str = False
a : Optional[int] = False
def _snake_case ( self : List[str] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = BeitModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=A_ , has_text_modality=A_ , hidden_size=37 )
def _snake_case ( self : Any ) ->Optional[Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""BEiT does not use inputs_embeds""" )
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(reason="""BEiT has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _snake_case ( self : List[Any] ) ->int:
'''simple docstring'''
pass
def _snake_case ( self : str ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(A_ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(A_ , nn.Linear ) )
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(A_ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , A_ )
def _snake_case ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*A_ )
def _snake_case ( self : int ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*A_ )
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*A_ )
def _snake_case ( self : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*A_ )
def _snake_case ( self : str ) ->List[Any]:
'''simple docstring'''
if not self.model_tester.is_training:
return
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if model_class in [*get_values(A_ ), BeitForMaskedImageModeling]:
continue
_UpperCAmelCase = model_class(A_ )
model.to(A_ )
model.train()
_UpperCAmelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
_UpperCAmelCase = model(**A_ ).loss
loss.backward()
def _snake_case ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
if not self.model_tester.is_training:
return
_UpperCAmelCase = False
_UpperCAmelCase = True
for model_class in self.all_model_classes:
# we don't test BeitForMaskedImageModeling
if (
model_class in [*get_values(A_ ), BeitForMaskedImageModeling]
or not model_class.supports_gradient_checkpointing
):
continue
_UpperCAmelCase = model_class(A_ )
model.gradient_checkpointing_enable()
model.to(A_ )
model.train()
_UpperCAmelCase = self._prepare_for_class(A_ , A_ , return_labels=A_ )
_UpperCAmelCase = model(**A_ ).loss
loss.backward()
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(A_ )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=A_ )
for name, param in model.named_parameters():
# we skip lambda parameters as these require special initial values
# determined by config.layer_scale_init_value
if "lambda" in name:
continue
if param.requires_grad:
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@slow
def _snake_case ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
for model_name in BEIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = BeitModel.from_pretrained(A_ )
self.assertIsNotNone(A_ )
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def _snake_case ( self : Dict ) ->str:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = BeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" ).to(A_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A_ , return_tensors="""pt""" ).pixel_values.to(A_ )
# prepare bool_masked_pos
_UpperCAmelCase = torch.ones((1, 1_96) , dtype=torch.bool ).to(A_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(pixel_values=A_ , bool_masked_pos=A_ )
_UpperCAmelCase = outputs.logits
# verify the logits
_UpperCAmelCase = torch.Size((1, 1_96, 81_92) )
self.assertEqual(logits.shape , A_ )
_UpperCAmelCase = torch.tensor(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] ).to(A_ )
self.assertTrue(torch.allclose(logits[bool_masked_pos][:3, :3] , A_ , atol=1e-2 ) )
@slow
def _snake_case ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = BeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" ).to(A_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**A_ )
_UpperCAmelCase = outputs.logits
# verify the logits
_UpperCAmelCase = torch.Size((1, 10_00) )
self.assertEqual(logits.shape , A_ )
_UpperCAmelCase = torch.tensor([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
_UpperCAmelCase = 2_81
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def _snake_case ( self : List[str] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = BeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" ).to(
A_ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**A_ )
_UpperCAmelCase = outputs.logits
# verify the logits
_UpperCAmelCase = torch.Size((1, 2_18_41) )
self.assertEqual(logits.shape , A_ )
_UpperCAmelCase = torch.tensor([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] ).to(A_ )
self.assertTrue(torch.allclose(logits[0, :3] , A_ , atol=1e-4 ) )
_UpperCAmelCase = 23_96
self.assertEqual(logits.argmax(-1 ).item() , A_ )
@slow
def _snake_case ( self : List[Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_UpperCAmelCase = model.to(A_ )
_UpperCAmelCase = BeitImageProcessor(do_resize=A_ , size=6_40 , do_center_crop=A_ )
_UpperCAmelCase = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
_UpperCAmelCase = Image.open(ds[0]["""file"""] )
_UpperCAmelCase = image_processor(images=A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**A_ )
_UpperCAmelCase = outputs.logits
# verify the logits
_UpperCAmelCase = torch.Size((1, 1_50, 1_60, 1_60) )
self.assertEqual(logits.shape , A_ )
_UpperCAmelCase = version.parse(PIL.__version__ ) < version.parse("""9.0.0""" )
if is_pillow_less_than_a:
_UpperCAmelCase = torch.tensor(
[
[[-4.9_2_2_5, -2.3_9_5_4, -3.0_5_2_2], [-2.8_8_2_2, -1.0_0_4_6, -1.7_5_6_1], [-2.9_5_4_9, -1.3_2_2_8, -2.1_3_4_7]],
[[-5.8_1_6_8, -3.4_1_2_9, -4.0_7_7_8], [-3.8_6_5_1, -2.2_2_1_4, -3.0_2_7_7], [-3.8_3_5_6, -2.4_6_4_3, -3.3_5_3_5]],
[[-0.0_0_7_8, 3.9_9_5_2, 4.0_7_5_4], [2.9_8_5_6, 4.6_9_4_4, 5.0_0_3_5], [3.2_4_1_3, 4.7_8_1_3, 4.9_9_6_9]],
] , device=A_ , )
else:
_UpperCAmelCase = torch.tensor(
[
[[-4.8_9_6_0, -2.3_6_8_8, -3.0_3_5_5], [-2.8_4_7_8, -0.9_8_3_6, -1.7_4_1_8], [-2.9_4_4_9, -1.3_3_3_2, -2.1_4_5_6]],
[[-5.8_0_8_1, -3.4_1_2_4, -4.1_0_0_6], [-3.8_5_6_1, -2.2_0_8_1, -3.0_3_2_3], [-3.8_3_6_5, -2.4_6_0_1, -3.3_6_6_9]],
[[-0.0_3_0_9, 3.9_8_6_8, 4.0_5_4_0], [2.9_6_4_0, 4.6_8_7_7, 4.9_9_7_6], [3.2_0_8_1, 4.7_6_9_0, 4.9_9_4_2]],
] , device=A_ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , A_ , atol=1e-4 ) )
@slow
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = BeitForSemanticSegmentation.from_pretrained("""microsoft/beit-base-finetuned-ade-640-640""" )
_UpperCAmelCase = model.to(A_ )
_UpperCAmelCase = BeitImageProcessor(do_resize=A_ , size=6_40 , do_center_crop=A_ )
_UpperCAmelCase = load_dataset("""hf-internal-testing/fixtures_ade20k""" , split="""test""" )
_UpperCAmelCase = Image.open(ds[0]["""file"""] )
_UpperCAmelCase = image_processor(images=A_ , return_tensors="""pt""" ).to(A_ )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**A_ )
_UpperCAmelCase = outputs.logits.detach().cpu()
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A_ , target_sizes=[(5_00, 3_00)] )
_UpperCAmelCase = torch.Size((5_00, 3_00) )
self.assertEqual(segmentation[0].shape , A_ )
_UpperCAmelCase = image_processor.post_process_semantic_segmentation(outputs=A_ )
_UpperCAmelCase = torch.Size((1_60, 1_60) )
self.assertEqual(segmentation[0].shape , A_ ) | 715 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : List[Any] = get_logger()
a : Optional[dict] = None
class a_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : int ) ->Tuple:
'''simple docstring'''
super().__init__(features=__UpperCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(__UpperCamelCase )}, as `jaxlib.xla_extension.Device` """
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
_UpperCAmelCase = device if isinstance(__UpperCamelCase , __UpperCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
_UpperCAmelCase = str(jax.devices()[0] )
_UpperCAmelCase = jnp_array_kwargs
@staticmethod
def _snake_case ( ) ->Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(__UpperCamelCase ): device for device in jax.devices()}
def _snake_case ( self : Dict , __UpperCamelCase : Any ) ->Union[str, Any]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , __UpperCamelCase ) and column:
if all(
isinstance(__UpperCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__UpperCamelCase , axis=0 )
return column
def _snake_case ( self : List[str] , __UpperCamelCase : Any ) ->Optional[int]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , (str, bytes, type(__UpperCamelCase )) ):
return value
elif isinstance(__UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase = {}
if isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
else:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
elif isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCamelCase , PIL.Image.Image ):
_UpperCAmelCase = np.asarray(__UpperCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__UpperCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__UpperCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__UpperCamelCase , """__array__""" ) and not isinstance(__UpperCamelCase , jax.Array ):
_UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
elif isinstance(__UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : dict ) ->int:
'''simple docstring'''
return map_nested(self._recursive_tensorize , __UpperCamelCase , map_list=__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_row(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_row(__UpperCamelCase )
return self.recursive_tensorize(__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : pa.Table ) ->"jax.Array":
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_column(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_column(__UpperCamelCase , pa_table.column_names[0] )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
_UpperCAmelCase = self._consolidate(__UpperCamelCase )
return column
def _snake_case ( self : Optional[Any] , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_batch(__UpperCamelCase )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
for column_name in batch:
_UpperCAmelCase = self._consolidate(batch[column_name] )
return batch | 19 | 0 |
"""simple docstring"""
from itertools import product
def _UpperCamelCase ( _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = sides_number
_UpperCAmelCase = max_face_number * dice_number
_UpperCAmelCase = [0] * (max_total + 1)
_UpperCAmelCase = 1
_UpperCAmelCase = range(_A , max_face_number + 1 )
for dice_numbers in product(_A , repeat=_A ):
_UpperCAmelCase = sum(_A )
totals_frequencies[total] += 1
return totals_frequencies
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = total_frequency_distribution(
sides_number=4 , dice_number=9 )
_UpperCAmelCase = total_frequency_distribution(
sides_number=6 , dice_number=6 )
_UpperCAmelCase = 0
_UpperCAmelCase = 9
_UpperCAmelCase = 4 * 9
_UpperCAmelCase = 6
for peter_total in range(_A , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
_UpperCAmelCase = (4**9) * (6**6)
_UpperCAmelCase = peter_wins_count / total_games_number
_UpperCAmelCase = round(_A , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F"{solution() = }") | 716 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Tuple = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 19 | 0 |
"""simple docstring"""
def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = int(__lowerCAmelCase )
# Initialize Result
_UpperCAmelCase = []
# Traverse through all denomination
for denomination in reversed(__lowerCAmelCase ):
# Find denominations
while int(__lowerCAmelCase ) >= int(__lowerCAmelCase ):
total_value -= int(__lowerCAmelCase )
answer.append(__lowerCAmelCase ) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
a : int = []
a : Optional[int] = '''0'''
if (
input('''Do you want to enter your denominations ? (yY/n): ''').strip().lower()
== "y"
):
a : Dict = int(input('''Enter the number of denominations you want to add: ''').strip())
for i in range(0, n):
denominations.append(int(input(F"Denomination {i}: ").strip()))
a : Union[str, Any] = input('''Enter the change you want to make in Indian Currency: ''').strip()
else:
# All denominations of Indian Currency if user does not enter
a : Any = [1, 2, 5, 1_0, 2_0, 5_0, 1_0_0, 5_0_0, 2_0_0_0]
a : List[str] = input('''Enter the change you want to make: ''').strip()
if int(value) == 0 or int(value) < 0:
print('''The total value cannot be zero or negative.''')
else:
print(F"Following is minimal change for {value}: ")
a : str = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=''' ''') | 717 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
a : int = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _UpperCamelCase ( _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = test_results.split(""" """ )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_UpperCAmelCase = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(_A ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = None
_UpperCAmelCase = False
for line in failures_short_lines.split("""\n""" ):
if re.search(R"""_ \[doctest\]""" , _A ):
_UpperCAmelCase = True
_UpperCAmelCase = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
_UpperCAmelCase = line
_UpperCAmelCase = False
return failures
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = title
_UpperCAmelCase = doc_test_results["""time_spent"""].split(""",""" )[0]
_UpperCAmelCase = doc_test_results["""success"""]
_UpperCAmelCase = doc_test_results["""failures"""]
_UpperCAmelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
_UpperCAmelCase = doc_test_results
@property
def _snake_case ( self : int ) ->str:
'''simple docstring'''
_UpperCAmelCase = [self._time_spent]
_UpperCAmelCase = 0
for time in time_spent:
_UpperCAmelCase = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = [0, 0, time_parts[0]]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f"""{int(__UpperCamelCase )}h{int(__UpperCamelCase )}m{int(__UpperCamelCase )}s"""
@property
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = 40
_UpperCAmelCase = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(__UpperCamelCase , __UpperCamelCase )}
_UpperCAmelCase = """"""
for category, failures in category_failures.items():
if len(__UpperCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__UpperCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__UpperCamelCase )
@staticmethod
def _snake_case ( ) ->Any:
'''simple docstring'''
_UpperCAmelCase = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(__UpperCamelCase )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=__UpperCamelCase , )
def _snake_case ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
_UpperCAmelCase = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else """All tests passed."""
_UpperCAmelCase = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=__UpperCamelCase , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase = """"""
for key, value in failures.items():
_UpperCAmelCase = value[:2_00] + """ [Truncated]""" if len(__UpperCamelCase ) > 2_50 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
_UpperCAmelCase = job_name
_UpperCAmelCase = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
_UpperCAmelCase = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _snake_case ( self : int ) ->Optional[Any]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
_UpperCAmelCase = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
_UpperCAmelCase = sorted(self.doc_test_results.items() , key=lambda __UpperCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
_UpperCAmelCase = f"""*Num failures* :{len(job_result["failed"] )} \n"""
_UpperCAmelCase = job_result["""failures"""]
_UpperCAmelCase = self.get_reply_blocks(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , text=__UpperCamelCase )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f"""Results for {job}""" , blocks=__UpperCamelCase , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = os.environ["""GITHUB_RUN_ID"""]
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A ).json()
_UpperCAmelCase = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , _A )
return {}
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {}
if os.path.exists(_A ):
_UpperCAmelCase = os.listdir(_A )
for file in files:
try:
with open(os.path.join(_A , _A ) , encoding="""utf-8""" ) as f:
_UpperCAmelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(_A , _A )}.""" ) from e
return _artifact
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
class a_ :
def __init__( self : List[Any] , __UpperCamelCase : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = name
_UpperCAmelCase = []
def __str__( self : int ) ->Optional[Any]:
'''simple docstring'''
return self.name
def _snake_case ( self : Dict , __UpperCamelCase : str ) ->int:
'''simple docstring'''
self.paths.append({"""name""": self.name, """path""": path} )
_UpperCAmelCase = {}
_UpperCAmelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
_UpperCAmelCase = directory
if artifact_name not in _available_artifacts:
_UpperCAmelCase = Artifact(_A )
_available_artifacts[artifact_name].add_path(_A )
return _available_artifacts
if __name__ == "__main__":
a : Dict = get_job_links()
a : Dict = retrieve_available_artifacts()
a : Optional[int] = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
a : Dict = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
a : int = github_actions_job_links.get('''run_doctests''')
a : Tuple = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
a : Optional[Any] = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
a , a , a : str = handle_test_results(artifact['''stats'''])
a : Tuple = failed
a : int = success
a : Any = time_spent[1:-1] + ''', '''
a : Dict = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
a : List[Any] = line.replace('''FAILED ''', '''''')
a : Tuple = line.split()[0].replace('''\n''', '''''')
if "::" in line:
a , a : Union[str, Any] = line.split('''::''')
else:
a , a : Optional[Any] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
a : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
a : Optional[Any] = all_failures[test] if test in all_failures else '''N/A'''
a : List[str] = failure
break
a : List[Any] = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply() | 19 | 0 |
import math
import numpy as np
import qiskit
from qiskit import Aer, ClassicalRegister, QuantumCircuit, QuantumRegister, execute
def _UpperCamelCase ( _A = 3 ) -> Optional[Any]:
"""simple docstring"""
if isinstance(lowercase__ , lowercase__ ):
raise TypeError("""number of qubits must be a integer.""" )
if number_of_qubits <= 0:
raise ValueError("""number of qubits must be > 0.""" )
if math.floor(lowercase__ ) != number_of_qubits:
raise ValueError("""number of qubits must be exact integer.""" )
if number_of_qubits > 1_0:
raise ValueError("""number of qubits too large to simulate(>10).""" )
_UpperCAmelCase = QuantumRegister(lowercase__ , """qr""" )
_UpperCAmelCase = ClassicalRegister(lowercase__ , """cr""" )
_UpperCAmelCase = QuantumCircuit(lowercase__ , lowercase__ )
_UpperCAmelCase = number_of_qubits
for i in range(lowercase__ ):
quantum_circuit.h(number_of_qubits - i - 1 )
counter -= 1
for j in range(lowercase__ ):
quantum_circuit.cp(np.pi / 2 ** (counter - j) , lowercase__ , lowercase__ )
for k in range(number_of_qubits // 2 ):
quantum_circuit.swap(lowercase__ , number_of_qubits - k - 1 )
# measure all the qubits
quantum_circuit.measure(lowercase__ , lowercase__ )
# simulate with 10000 shots
_UpperCAmelCase = Aer.get_backend("""qasm_simulator""" )
_UpperCAmelCase = execute(lowercase__ , lowercase__ , shots=1_0_0_0_0 )
return job.result().get_counts(lowercase__ )
if __name__ == "__main__":
print(
F"Total count for quantum fourier transform state is: \\n {quantum_fourier_transform(3)}"
) | 718 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase ( _A , _A=False ) -> str:
"""simple docstring"""
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
a : Union[str, Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
a : Tuple = parse_flag_from_env('''RUN_REMOTE''', default=False)
a : Union[str, Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
a : int = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
a : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
a : List[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
a : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
a : int = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
a : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
a : Any = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
a : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires faiss""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires regex""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires elasticsearch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires sqlalchemy""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires PyTorch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
if not config.TF_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires TensorFlow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
if not config.JAX_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires JAX""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not config.PIL_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires Pillow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
def _require_spacy_model(_A ):
try:
import spacy # noqa F401
spacy.load(_A )
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(_A ) )(_A )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCAmelCase = unittest.skip("""test is slow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
_UpperCAmelCase = unittest.skip("""test is local""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCAmelCase = unittest.skip("""test is packaged""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCAmelCase = unittest.skip("""test requires remote""" )(_A )
return test_case
def _UpperCamelCase ( *_A ) -> Dict:
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_A ) and name.startswith("""test""" ):
for decorator in decorators:
_UpperCAmelCase = decorator(_A )
setattr(cls , _A , _A )
return cls
return decorate
class a_ ( _UpperCAmelCase ):
pass
class a_ ( _UpperCAmelCase ):
a : Any = 0
a : Optional[Any] = 1
a : int = 2
@contextmanager
def _UpperCamelCase ( _A=OfflineSimulationMode.CONNECTION_FAILS , _A=1e-16 ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = requests.Session().request
def timeout_request(_A , _A , _A , **_A ):
# Change the url to an invalid url so that the connection hangs
_UpperCAmelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_UpperCAmelCase = timeout
try:
return online_request(_A , _A , **_A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCAmelCase = url
_UpperCAmelCase = e.args[0]
_UpperCAmelCase = (max_retry_error.args[0].replace("""10.255.255.1""" , F"""OfflineMock[{url}]""" ),)
_UpperCAmelCase = (max_retry_error,)
raise
def raise_connection_error(_A , _A , **_A ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=_A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , _A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , _A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , _A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _UpperCamelCase ( *_A , **_A ) -> str:
"""simple docstring"""
_UpperCAmelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_A , **_A ) as tmp_dir:
try:
os.chdir(_A )
yield
finally:
os.chdir(_A )
@contextmanager
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
return deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist()
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_A , *_A , **_A ):
try:
return func(*_A , **_A )
except HTTPError as err:
if str(_A ).startswith("""500""" ) or str(_A ).startswith("""502""" ):
pytest.xfail(str(_A ) )
raise err
return decorator.decorator(_wrapper , _A )
class a_ :
def __init__( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_A )
else:
break
async def _UpperCamelCase ( _A , _A=None , _A=None , _A=None , _A=False , _A=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(_A ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_A , _A , _A , _A="" ):
_UpperCAmelCase = line.decode("""utf-8""" ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _A : tee(_A , _A , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda _A : tee(_A , _A , sys.stderr , label="""stderr:""" ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def _UpperCamelCase ( _A , _A=None , _A=None , _A=1_8_0 , _A=False , _A=True ) -> _RunOutput:
"""simple docstring"""
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
_UpperCAmelCase = """ """.join(_A )
if result.returncode > 0:
_UpperCAmelCase = """\n""".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
_UpperCAmelCase = re.sub(R"""^gw""" , """""" , _A , 0 , re.M )
return int(_A )
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 2_9_5_0_0
_UpperCAmelCase = pytest_xdist_worker_id()
return port + uniq_delta | 19 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
a : Tuple = {'configuration_xglm': ['XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP', 'XGLMConfig']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = ['XGLMTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : str = ['XGLMTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
'XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'XGLMForCausalLM',
'XGLMModel',
'XGLMPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'FlaxXGLMForCausalLM',
'FlaxXGLMModel',
'FlaxXGLMPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFXGLMForCausalLM',
'TFXGLMModel',
'TFXGLMPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
a : str = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 719 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class a_ ( _UpperCAmelCase ):
a : List[Any] = ''
a : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Tuple , __UpperCamelCase : Optional[DatasetInfo] = None , __UpperCamelCase : Optional[str] = None , **__UpperCamelCase : Any , ) ->Any:
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
_UpperCAmelCase = repo_info
_UpperCAmelCase = token
_UpperCAmelCase = None
def _snake_case ( self : List[str] ) ->List[str]:
'''simple docstring'''
if self.dir_cache is None:
_UpperCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_UpperCAmelCase = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : str = "rb" , **__UpperCamelCase : Any , ) ->List[str]:
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
_UpperCAmelCase = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def _snake_case ( self : int , __UpperCamelCase : int , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple=False , **__UpperCamelCase : List[str] ) ->Optional[Any]:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = PurePosixPath(path.strip("""/""" ) )
_UpperCAmelCase = {}
for p, f in self.dir_cache.items():
_UpperCAmelCase = PurePosixPath(p.strip("""/""" ) )
_UpperCAmelCase = p.parent
if root == path:
_UpperCAmelCase = f
_UpperCAmelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 19 | 0 |
from __future__ import annotations
from typing import Any
class a_ :
def __init__( self : Optional[Any] , __UpperCamelCase : int ) ->None:
'''simple docstring'''
_UpperCAmelCase = num_of_nodes
_UpperCAmelCase = []
_UpperCAmelCase = {}
def _snake_case ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->None:
'''simple docstring'''
self.m_edges.append([u_node, v_node, weight] )
def _snake_case ( self : Tuple , __UpperCamelCase : int ) ->int:
'''simple docstring'''
if self.m_component[u_node] == u_node:
return u_node
return self.find_component(self.m_component[u_node] )
def _snake_case ( self : int , __UpperCamelCase : int ) ->None:
'''simple docstring'''
if self.m_component[u_node] != u_node:
for k in self.m_component:
_UpperCAmelCase = self.find_component(UpperCAmelCase__ )
def _snake_case ( self : Tuple , __UpperCamelCase : list[int] , __UpperCamelCase : int , __UpperCamelCase : int ) ->None:
'''simple docstring'''
if component_size[u_node] <= component_size[v_node]:
_UpperCAmelCase = v_node
component_size[v_node] += component_size[u_node]
self.set_component(UpperCAmelCase__ )
elif component_size[u_node] >= component_size[v_node]:
_UpperCAmelCase = self.find_component(UpperCAmelCase__ )
component_size[u_node] += component_size[v_node]
self.set_component(UpperCAmelCase__ )
def _snake_case ( self : int ) ->None:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = 0
_UpperCAmelCase = [-1] * self.m_num_of_nodes
# A list of components (initialized to all of the nodes)
for node in range(self.m_num_of_nodes ):
self.m_component.update({node: node} )
component_size.append(1 )
_UpperCAmelCase = self.m_num_of_nodes
while num_of_components > 1:
for edge in self.m_edges:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = edge
_UpperCAmelCase = self.m_component[u]
_UpperCAmelCase = self.m_component[v]
if u_component != v_component:
for component in (u_component, v_component):
if (
minimum_weight_edge[component] == -1
or minimum_weight_edge[component][2] > w
):
_UpperCAmelCase = [u, v, w]
for edge in minimum_weight_edge:
if isinstance(UpperCAmelCase__ , UpperCAmelCase__ ):
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = edge
_UpperCAmelCase = self.m_component[u]
_UpperCAmelCase = self.m_component[v]
if u_component != v_component:
mst_weight += w
self.union(UpperCAmelCase__ , UpperCAmelCase__ , UpperCAmelCase__ )
print(f"""Added edge [{u} - {v}]\nAdded weight: {w}\n""" )
num_of_components -= 1
_UpperCAmelCase = [-1] * self.m_num_of_nodes
print(f"""The total weight of the minimal spanning tree is: {mst_weight}""" )
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod() | 720 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
a : Optional[Any] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
a : List[str] = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
a : Any = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
a : int = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
a : List[Any] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any]=[1, 10, 1_00] , __UpperCamelCase : Dict=4 , __UpperCamelCase : Tuple=3.0 ) ->Union[str, Any]:
'''simple docstring'''
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=__UpperCamelCase ) as executor:
_UpperCAmelCase = []
_UpperCAmelCase = Counter()
_UpperCAmelCase = 0
_UpperCAmelCase = defaultdict(__UpperCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(__UpperCamelCase , __UpperCamelCase ) ):
for candidate in candidates:
_UpperCAmelCase = candidate + """\n""" + test_case
_UpperCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
_UpperCAmelCase = executor.submit(__UpperCamelCase , *__UpperCamelCase )
futures.append(__UpperCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__UpperCamelCase ):
_UpperCAmelCase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
_UpperCAmelCase ,_UpperCAmelCase = [], []
for result in results.values():
result.sort()
_UpperCAmelCase = [r[1]["""passed"""] for r in result]
total.append(len(__UpperCamelCase ) )
correct.append(sum(__UpperCamelCase ) )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = k
_UpperCAmelCase = {f"""pass@{k}""": estimate_pass_at_k(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
def estimator(_A , _A , _A ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_A , _A ):
_UpperCAmelCase = itertools.repeat(_A , len(_A ) )
else:
assert len(_A ) == len(_A )
_UpperCAmelCase = iter(_A )
return np.array([estimator(int(_A ) , int(_A ) , _A ) for n, c in zip(_A , _A )] ) | 19 | 0 |
import json
from typing import Dict, List, Optional, Tuple, Union
from tokenizers import pre_tokenizers, processors
from ...tokenization_utils_base import AddedToken, BatchEncoding, EncodedInput
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import PaddingStrategy, logging
from .tokenization_led import LEDTokenizer
a : str = logging.get_logger(__name__)
a : str = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt''', '''tokenizer_file''': '''tokenizer.json'''}
a : Union[str, Any] = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
a : Dict = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
class a_ ( __a ):
a : int = VOCAB_FILES_NAMES
a : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
a : Optional[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : List[str] = LEDTokenizer
a : List[Any] = ['input_ids', 'attention_mask']
def __init__( self : Optional[Any] , __UpperCamelCase : Dict=None , __UpperCamelCase : str=None , __UpperCamelCase : Any=None , __UpperCamelCase : Union[str, Any]="replace" , __UpperCamelCase : int="<s>" , __UpperCamelCase : List[Any]="</s>" , __UpperCamelCase : int="</s>" , __UpperCamelCase : int="<s>" , __UpperCamelCase : List[Any]="<unk>" , __UpperCamelCase : Optional[int]="<pad>" , __UpperCamelCase : List[str]="<mask>" , __UpperCamelCase : List[Any]=False , __UpperCamelCase : Union[str, Any]=True , **__UpperCamelCase : Any , ) ->List[Any]:
'''simple docstring'''
super().__init__(
A__ , A__ , tokenizer_file=A__ , errors=A__ , bos_token=A__ , eos_token=A__ , sep_token=A__ , cls_token=A__ , unk_token=A__ , pad_token=A__ , mask_token=A__ , add_prefix_space=A__ , trim_offsets=A__ , **A__ , )
_UpperCAmelCase = json.loads(self.backend_tokenizer.pre_tokenizer.__getstate__() )
if pre_tok_state.get("""add_prefix_space""" , A__ ) != add_prefix_space:
_UpperCAmelCase = getattr(A__ , pre_tok_state.pop("""type""" ) )
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = pre_tok_class(**A__ )
_UpperCAmelCase = add_prefix_space
# the pre_tokenizer is already updated in the GPT2TokenizerFast `__init__`
_UpperCAmelCase = """post_processor"""
_UpperCAmelCase = getattr(self.backend_tokenizer , A__ , A__ )
if tokenizer_component_instance:
_UpperCAmelCase = json.loads(tokenizer_component_instance.__getstate__() )
# The lists 'sep' and 'cls' must be cased in tuples for the object `post_processor_class`
if "sep" in state:
_UpperCAmelCase = tuple(state["""sep"""] )
if "cls" in state:
_UpperCAmelCase = tuple(state["""cls"""] )
_UpperCAmelCase = False
if state.get("""add_prefix_space""" , A__ ) != add_prefix_space:
_UpperCAmelCase = add_prefix_space
_UpperCAmelCase = True
if state.get("""trim_offsets""" , A__ ) != trim_offsets:
_UpperCAmelCase = trim_offsets
_UpperCAmelCase = True
if changes_to_apply:
_UpperCAmelCase = getattr(A__ , state.pop("""type""" ) )
_UpperCAmelCase = component_class(**A__ )
setattr(self.backend_tokenizer , A__ , A__ )
@property
# Copied from transformers.models.bart.tokenization_bart_fast.BartTokenizerFast.mask_token with BART->LED
def _snake_case ( self : str ) ->str:
'''simple docstring'''
if self._mask_token is None:
if self.verbose:
logger.error("""Using mask_token, but it is not set yet.""" )
return None
return str(self._mask_token )
@mask_token.setter
def _snake_case ( self : Dict , __UpperCamelCase : Tuple ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = AddedToken(A__ , lstrip=A__ , rstrip=A__ ) if isinstance(A__ , A__ ) else value
_UpperCAmelCase = value
def _snake_case ( self : List[str] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : int ) ->BatchEncoding:
'''simple docstring'''
_UpperCAmelCase = kwargs.get("""is_split_into_words""" , A__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._batch_encode_plus(*A__ , **A__ )
def _snake_case ( self : Any , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Dict ) ->BatchEncoding:
'''simple docstring'''
_UpperCAmelCase = kwargs.get("""is_split_into_words""" , A__ )
if is_split_into_words and not self.add_prefix_space:
raise ValueError(
f"""You need to instantiate {self.__class__.__name__} with add_prefix_space=True """
"""to use it with pretokenized inputs.""" )
return super()._encode_plus(*A__ , **A__ )
def _snake_case ( self : Dict , __UpperCamelCase : Dict , __UpperCamelCase : List[str] = None ) ->Tuple[str]:
'''simple docstring'''
_UpperCAmelCase = self._tokenizer.model.save(A__ , name=A__ )
return tuple(A__ )
def _snake_case ( self : str , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int]=None ) ->Any:
'''simple docstring'''
_UpperCAmelCase = [self.bos_token_id] + token_ids_a + [self.eos_token_id]
if token_ids_a is None:
return output
return output + [self.eos_token_id] + token_ids_a + [self.eos_token_id]
def _snake_case ( self : List[Any] , __UpperCamelCase : str , __UpperCamelCase : Dict = None ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase = [self.sep_token_id]
_UpperCAmelCase = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def _snake_case ( self : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] = None , __UpperCamelCase : Union[str, Any] = PaddingStrategy.DO_NOT_PAD , __UpperCamelCase : List[Any] = None , __UpperCamelCase : str = None , ) ->dict:
'''simple docstring'''
_UpperCAmelCase = super()._pad(
encoded_inputs=A__ , max_length=A__ , padding_strategy=A__ , pad_to_multiple_of=A__ , return_attention_mask=A__ , )
# Load from model defaults
if return_attention_mask is None:
_UpperCAmelCase = """attention_mask""" in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
_UpperCAmelCase = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
_UpperCAmelCase = len(encoded_inputs["""global_attention_mask"""] ) != len(A__ )
if needs_to_be_padded:
_UpperCAmelCase = len(A__ ) - len(encoded_inputs["""global_attention_mask"""] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
_UpperCAmelCase = (
encoded_inputs["""global_attention_mask"""] + [-1] * difference
)
elif self.padding_side == "left":
_UpperCAmelCase = [-1] * difference + encoded_inputs[
"""global_attention_mask"""
]
else:
raise ValueError("""Invalid padding strategy:""" + str(self.padding_side ) )
return encoded_inputs | 721 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> np.array:
"""simple docstring"""
_UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCAmelCase = np.zeros((n + 1,) )
_UpperCAmelCase = ya
_UpperCAmelCase = xa
for k in range(_A ):
_UpperCAmelCase = y[k] + step_size * ode_func(_A , y[k] )
_UpperCAmelCase = y[k] + (
(step_size / 2) * (ode_func(_A , y[k] ) + ode_func(x + step_size , _A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 19 | 0 |
"""simple docstring"""
a : str = '''ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'''
def _UpperCamelCase ( _A ) -> bytes:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = F"""a bytes-like object is required, not '{data.__class__.__name__}'"""
raise TypeError(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = """""".join(bin(_SCREAMING_SNAKE_CASE )[2:].zfill(8 ) for byte in data )
_UpperCAmelCase = len(_SCREAMING_SNAKE_CASE ) % 6 != 0
if padding_needed:
# The padding that will be added later
_UpperCAmelCase = b"""=""" * ((6 - len(_SCREAMING_SNAKE_CASE ) % 6) // 2)
# Append binary_stream with arbitrary binary digits (0's by default) to make its
# length a multiple of 6.
binary_stream += "0" * (6 - len(_SCREAMING_SNAKE_CASE ) % 6)
else:
_UpperCAmelCase = b""""""
# Encode every 6 binary digits to their corresponding Base64 character
return (
"".join(
B64_CHARSET[int(binary_stream[index : index + 6] , 2 )]
for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 6 ) ).encode()
+ padding
)
def _UpperCamelCase ( _A ) -> bytes:
"""simple docstring"""
if not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ) and not isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = (
"""argument should be a bytes-like object or ASCII string, """
F"""not '{encoded_data.__class__.__name__}'"""
)
raise TypeError(_SCREAMING_SNAKE_CASE )
# In case encoded_data is a bytes-like object, make sure it contains only
# ASCII characters so we convert it to a string object
if isinstance(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
try:
_UpperCAmelCase = encoded_data.decode("""utf-8""" )
except UnicodeDecodeError:
raise ValueError("""base64 encoded data should only contain ASCII characters""" )
_UpperCAmelCase = encoded_data.count("""=""" )
# Check if the encoded string contains non base64 characters
if padding:
assert all(
char in B64_CHARSET for char in encoded_data[:-padding] ), "Invalid base64 character(s) found."
else:
assert all(
char in B64_CHARSET for char in encoded_data ), "Invalid base64 character(s) found."
# Check the padding
assert len(_SCREAMING_SNAKE_CASE ) % 4 == 0 and padding < 3, "Incorrect padding"
if padding:
# Remove padding if there is one
_UpperCAmelCase = encoded_data[:-padding]
_UpperCAmelCase = """""".join(
bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )[: -padding * 2]
else:
_UpperCAmelCase = """""".join(
bin(B64_CHARSET.index(_SCREAMING_SNAKE_CASE ) )[2:].zfill(6 ) for char in encoded_data )
_UpperCAmelCase = [
int(binary_stream[index : index + 8] , 2 )
for index in range(0 , len(_SCREAMING_SNAKE_CASE ) , 8 )
]
return bytes(_SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
import doctest
doctest.testmod() | 700 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
a : List[str] = logging.getLogger(__name__)
a : int = {'''facebook/bart-base''': BartForConditionalGeneration}
a : Dict = {'''facebook/bart-base''': BartTokenizer}
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_A , default=_A , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_A , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_A , default=_A , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_A , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_A , )
parser.add_argument(
"""--config_name""" , type=_A , default=_A , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_A , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_A , default=_A , help="""Where to store the final ONNX file.""" )
_UpperCAmelCase = parser.parse_args()
return args
def _UpperCamelCase ( _A , _A="cpu" ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = model_dict[model_name].from_pretrained(_A ).to(_A )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_A )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_A ) )
with torch.no_grad():
_UpperCAmelCase = """My friends are cool but they eat too many carbs."""
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="""pt""" ).to(model.device )
_UpperCAmelCase = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_A , max_length=_A , early_stopping=_A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_A , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _A , opset_version=1_4 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_A , )
logger.info("""Model exported to {}""".format(_A ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_A ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_A ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_A )
_UpperCAmelCase = ort_sess.run(
_A , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_A ),
"""max_length""": np.array(_A ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase ,_UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _A )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_A )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_A , _A , _A , _A , _A )
if __name__ == "__main__":
main() | 19 | 0 |
"""simple docstring"""
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
ConditionalDetrConfig,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
a : Optional[int] = logging.get_logger(__name__)
# here we list all keys to be renamed (original name on the left, our name on the right)
a : Tuple = []
for i in range(6):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.weight", F"encoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.encoder.layers.{i}.self_attn.out_proj.bias", F"encoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.weight", F"encoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear1.bias", F"encoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.weight", F"encoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.linear2.bias", F"encoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.encoder.layers.{i}.norm1.weight", F"encoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.encoder.layers.{i}.norm1.bias", F"encoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.weight", F"encoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.encoder.layers.{i}.norm2.bias", F"encoder.layers.{i}.final_layer_norm.bias"))
# decoder layers: 2 times output projection, 2 feedforward neural networks and 3 layernorms
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.weight", F"decoder.layers.{i}.self_attn.out_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.self_attn.out_proj.bias", F"decoder.layers.{i}.self_attn.out_proj.bias")
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.cross_attn.out_proj.weight",
F"decoder.layers.{i}.encoder_attn.out_proj.weight",
)
)
rename_keys.append(
(
F"transformer.decoder.layers.{i}.cross_attn.out_proj.bias",
F"decoder.layers.{i}.encoder_attn.out_proj.bias",
)
)
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.weight", F"decoder.layers.{i}.fc1.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear1.bias", F"decoder.layers.{i}.fc1.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.weight", F"decoder.layers.{i}.fc2.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.linear2.bias", F"decoder.layers.{i}.fc2.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm1.weight", F"decoder.layers.{i}.self_attn_layer_norm.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm1.bias", F"decoder.layers.{i}.self_attn_layer_norm.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.weight", F"decoder.layers.{i}.encoder_attn_layer_norm.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.norm2.bias", F"decoder.layers.{i}.encoder_attn_layer_norm.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.weight", F"decoder.layers.{i}.final_layer_norm.weight"))
rename_keys.append((F"transformer.decoder.layers.{i}.norm3.bias", F"decoder.layers.{i}.final_layer_norm.bias"))
# q, k, v projections in self/cross-attention in decoder for conditional DETR
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qcontent_proj.weight", F"decoder.layers.{i}.sa_qcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kcontent_proj.weight", F"decoder.layers.{i}.sa_kcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qpos_proj.weight", F"decoder.layers.{i}.sa_qpos_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kpos_proj.weight", F"decoder.layers.{i}.sa_kpos_proj.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.sa_v_proj.weight", F"decoder.layers.{i}.sa_v_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qcontent_proj.weight", F"decoder.layers.{i}.ca_qcontent_proj.weight")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.weight", f"decoder.layers.{i}.ca_qpos_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kcontent_proj.weight", F"decoder.layers.{i}.ca_kcontent_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kpos_proj.weight", F"decoder.layers.{i}.ca_kpos_proj.weight")
)
rename_keys.append((F"transformer.decoder.layers.{i}.ca_v_proj.weight", F"decoder.layers.{i}.ca_v_proj.weight"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qpos_sine_proj.weight", F"decoder.layers.{i}.ca_qpos_sine_proj.weight")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_qcontent_proj.bias", F"decoder.layers.{i}.sa_qcontent_proj.bias")
)
rename_keys.append(
(F"transformer.decoder.layers.{i}.sa_kcontent_proj.bias", F"decoder.layers.{i}.sa_kcontent_proj.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.sa_qpos_proj.bias", F"decoder.layers.{i}.sa_qpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.sa_kpos_proj.bias", F"decoder.layers.{i}.sa_kpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.sa_v_proj.bias", F"decoder.layers.{i}.sa_v_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qcontent_proj.bias", F"decoder.layers.{i}.ca_qcontent_proj.bias")
)
# rename_keys.append((f"transformer.decoder.layers.{i}.ca_qpos_proj.bias", f"decoder.layers.{i}.ca_qpos_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_kcontent_proj.bias", F"decoder.layers.{i}.ca_kcontent_proj.bias")
)
rename_keys.append((F"transformer.decoder.layers.{i}.ca_kpos_proj.bias", F"decoder.layers.{i}.ca_kpos_proj.bias"))
rename_keys.append((F"transformer.decoder.layers.{i}.ca_v_proj.bias", F"decoder.layers.{i}.ca_v_proj.bias"))
rename_keys.append(
(F"transformer.decoder.layers.{i}.ca_qpos_sine_proj.bias", F"decoder.layers.{i}.ca_qpos_sine_proj.bias")
)
# convolutional projection + query embeddings + layernorm of decoder + class and bounding box heads
# for conditional DETR, also convert reference point head and query scale MLP
rename_keys.extend(
[
('''input_proj.weight''', '''input_projection.weight'''),
('''input_proj.bias''', '''input_projection.bias'''),
('''query_embed.weight''', '''query_position_embeddings.weight'''),
('''transformer.decoder.norm.weight''', '''decoder.layernorm.weight'''),
('''transformer.decoder.norm.bias''', '''decoder.layernorm.bias'''),
('''class_embed.weight''', '''class_labels_classifier.weight'''),
('''class_embed.bias''', '''class_labels_classifier.bias'''),
('''bbox_embed.layers.0.weight''', '''bbox_predictor.layers.0.weight'''),
('''bbox_embed.layers.0.bias''', '''bbox_predictor.layers.0.bias'''),
('''bbox_embed.layers.1.weight''', '''bbox_predictor.layers.1.weight'''),
('''bbox_embed.layers.1.bias''', '''bbox_predictor.layers.1.bias'''),
('''bbox_embed.layers.2.weight''', '''bbox_predictor.layers.2.weight'''),
('''bbox_embed.layers.2.bias''', '''bbox_predictor.layers.2.bias'''),
('''transformer.decoder.ref_point_head.layers.0.weight''', '''decoder.ref_point_head.layers.0.weight'''),
('''transformer.decoder.ref_point_head.layers.0.bias''', '''decoder.ref_point_head.layers.0.bias'''),
('''transformer.decoder.ref_point_head.layers.1.weight''', '''decoder.ref_point_head.layers.1.weight'''),
('''transformer.decoder.ref_point_head.layers.1.bias''', '''decoder.ref_point_head.layers.1.bias'''),
('''transformer.decoder.query_scale.layers.0.weight''', '''decoder.query_scale.layers.0.weight'''),
('''transformer.decoder.query_scale.layers.0.bias''', '''decoder.query_scale.layers.0.bias'''),
('''transformer.decoder.query_scale.layers.1.weight''', '''decoder.query_scale.layers.1.weight'''),
('''transformer.decoder.query_scale.layers.1.bias''', '''decoder.query_scale.layers.1.bias'''),
('''transformer.decoder.layers.0.ca_qpos_proj.weight''', '''decoder.layers.0.ca_qpos_proj.weight'''),
('''transformer.decoder.layers.0.ca_qpos_proj.bias''', '''decoder.layers.0.ca_qpos_proj.bias'''),
]
)
def _UpperCamelCase ( _A , _A , _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = state_dict.pop(lowercase__ )
_UpperCAmelCase = val
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = OrderedDict()
for key, value in state_dict.items():
if "backbone.0.body" in key:
_UpperCAmelCase = key.replace("""backbone.0.body""" , """backbone.conv_encoder.model""" )
_UpperCAmelCase = value
else:
_UpperCAmelCase = value
return new_state_dict
def _UpperCamelCase ( _A , _A=False ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = """"""
if is_panoptic:
_UpperCAmelCase = """conditional_detr."""
# first: transformer encoder
for i in range(6 ):
# read in weights + bias of input projection layer (in PyTorch's MultiHeadAttention, this is a single matrix + bias)
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_weight""" )
_UpperCAmelCase = state_dict.pop(F"""{prefix}transformer.encoder.layers.{i}.self_attn.in_proj_bias""" )
# next, add query, keys and values (in that order) to the state dict
_UpperCAmelCase = in_proj_weight[:2_5_6, :]
_UpperCAmelCase = in_proj_bias[:2_5_6]
_UpperCAmelCase = in_proj_weight[2_5_6:5_1_2, :]
_UpperCAmelCase = in_proj_bias[2_5_6:5_1_2]
_UpperCAmelCase = in_proj_weight[-2_5_6:, :]
_UpperCAmelCase = in_proj_bias[-2_5_6:]
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
_UpperCAmelCase = """http://images.cocodataset.org/val2017/000000039769.jpg"""
_UpperCAmelCase = Image.open(requests.get(lowercase__ , stream=lowercase__ ).raw )
return im
@torch.no_grad()
def _UpperCamelCase ( _A , _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = ConditionalDetrConfig()
# set backbone and dilation attributes
if "resnet101" in model_name:
_UpperCAmelCase = """resnet101"""
if "dc5" in model_name:
_UpperCAmelCase = True
_UpperCAmelCase = """panoptic""" in model_name
if is_panoptic:
_UpperCAmelCase = 2_5_0
else:
_UpperCAmelCase = 9_1
_UpperCAmelCase = """huggingface/label-files"""
_UpperCAmelCase = """coco-detection-id2label.json"""
_UpperCAmelCase = json.load(open(hf_hub_download(lowercase__ , lowercase__ , repo_type="""dataset""" ) , """r""" ) )
_UpperCAmelCase = {int(lowercase__ ): v for k, v in idalabel.items()}
_UpperCAmelCase = idalabel
_UpperCAmelCase = {v: k for k, v in idalabel.items()}
# load image processor
_UpperCAmelCase = """coco_panoptic""" if is_panoptic else """coco_detection"""
_UpperCAmelCase = ConditionalDetrImageProcessor(format=lowercase__ )
# prepare image
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=lowercase__ , return_tensors="""pt""" )
_UpperCAmelCase = encoding["""pixel_values"""]
logger.info(F"""Converting model {model_name}...""" )
# load original model from torch hub
_UpperCAmelCase = torch.hub.load("""DeppMeng/ConditionalDETR""" , lowercase__ , pretrained=lowercase__ ).eval()
_UpperCAmelCase = conditional_detr.state_dict()
# rename keys
for src, dest in rename_keys:
if is_panoptic:
_UpperCAmelCase = """conditional_detr.""" + src
rename_key(lowercase__ , lowercase__ , lowercase__ )
_UpperCAmelCase = rename_backbone_keys(lowercase__ )
# query, key and value matrices need special treatment
read_in_q_k_v(lowercase__ , is_panoptic=lowercase__ )
# important: we need to prepend a prefix to each of the base model keys as the head models use different attributes for them
_UpperCAmelCase = """conditional_detr.model.""" if is_panoptic else """model."""
for key in state_dict.copy().keys():
if is_panoptic:
if (
key.startswith("""conditional_detr""" )
and not key.startswith("""class_labels_classifier""" )
and not key.startswith("""bbox_predictor""" )
):
_UpperCAmelCase = state_dict.pop(lowercase__ )
_UpperCAmelCase = val
elif "class_labels_classifier" in key or "bbox_predictor" in key:
_UpperCAmelCase = state_dict.pop(lowercase__ )
_UpperCAmelCase = val
elif key.startswith("""bbox_attention""" ) or key.startswith("""mask_head""" ):
continue
else:
_UpperCAmelCase = state_dict.pop(lowercase__ )
_UpperCAmelCase = val
else:
if not key.startswith("""class_labels_classifier""" ) and not key.startswith("""bbox_predictor""" ):
_UpperCAmelCase = state_dict.pop(lowercase__ )
_UpperCAmelCase = val
# finally, create HuggingFace model and load state dict
_UpperCAmelCase = ConditionalDetrForSegmentation(lowercase__ ) if is_panoptic else ConditionalDetrForObjectDetection(lowercase__ )
model.load_state_dict(lowercase__ )
model.eval()
model.push_to_hub(repo_id=lowercase__ , organization="""DepuMeng""" , commit_message="""Add model""" )
# verify our conversion
_UpperCAmelCase = conditional_detr(lowercase__ )
_UpperCAmelCase = model(lowercase__ )
assert torch.allclose(outputs.logits , original_outputs["""pred_logits"""] , atol=1e-4 )
assert torch.allclose(outputs.pred_boxes , original_outputs["""pred_boxes"""] , atol=1e-4 )
if is_panoptic:
assert torch.allclose(outputs.pred_masks , original_outputs["""pred_masks"""] , atol=1e-4 )
# Save model and image processor
logger.info(F"""Saving PyTorch model and image processor to {pytorch_dump_folder_path}...""" )
Path(lowercase__ ).mkdir(exist_ok=lowercase__ )
model.save_pretrained(lowercase__ )
image_processor.save_pretrained(lowercase__ )
if __name__ == "__main__":
a : int = argparse.ArgumentParser()
parser.add_argument(
'''--model_name''',
default='''conditional_detr_resnet50''',
type=str,
help='''Name of the CONDITIONAL_DETR model you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the folder to output PyTorch model.'''
)
a : Dict = parser.parse_args()
convert_conditional_detr_checkpoint(args.model_name, args.pytorch_dump_folder_path) | 701 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A , _A , _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = requests.get(_A , headers=_A , allow_redirects=_A )
_UpperCAmelCase = result.headers["""Location"""]
_UpperCAmelCase = requests.get(_A , allow_redirects=_A )
_UpperCAmelCase = os.path.join(_A , F"""{artifact_name}.zip""" )
with open(_A , """wb""" ) as fp:
fp.write(response.content )
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = None
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_A ) as f:
for line in f:
_UpperCAmelCase = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_UpperCAmelCase = line[: line.index(""": """ )]
_UpperCAmelCase = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_UpperCAmelCase = line[len("""FAILED """ ) :]
failed_tests.append(_A )
elif filename == "job_name.txt":
_UpperCAmelCase = line
if len(_A ) != len(_A ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(_A )} for `errors` """
F"""and {len(_A )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
_UpperCAmelCase = None
if job_name and job_links:
_UpperCAmelCase = job_links.get(_A , _A )
# A list with elements of the form (line of error, error, failed test)
_UpperCAmelCase = [x + [y] + [job_link] for x, y in zip(_A , _A )]
return result
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = [os.path.join(_A , _A ) for p in os.listdir(_A ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_A , job_links=_A ) )
return errors
def _UpperCamelCase ( _A , _A=None ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_UpperCAmelCase = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_UpperCAmelCase = test.split("""/""" )[2]
else:
_UpperCAmelCase = None
return test
def _UpperCamelCase ( _A , _A=None ) -> Any:
"""simple docstring"""
_UpperCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_UpperCAmelCase = [x for x in logs if x[2] is not None]
_UpperCAmelCase = {x[2] for x in logs}
_UpperCAmelCase = {}
for test in tests:
_UpperCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_UpperCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_UpperCAmelCase = {"""count""": n_errors, """errors""": error_counts}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = """| no. | error | status |"""
_UpperCAmelCase = """|-:|:-|:-|"""
_UpperCAmelCase = [header, sep]
for error in reduced_by_error:
_UpperCAmelCase = reduced_by_error[error]["""count"""]
_UpperCAmelCase = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(_A )
return "\n".join(_A )
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = """| model | no. of errors | major error | count |"""
_UpperCAmelCase = """|-:|-:|-:|-:|"""
_UpperCAmelCase = [header, sep]
for model in reduced_by_model:
_UpperCAmelCase = reduced_by_model[model]["""count"""]
_UpperCAmelCase ,_UpperCAmelCase = list(reduced_by_model[model]["""errors"""].items() )[0]
_UpperCAmelCase = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(_A )
return "\n".join(_A )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
a : Dict = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a : Tuple = get_job_links(args.workflow_run_id, token=args.token)
a : Tuple = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a : List[Any] = k.find(''' / ''')
a : Tuple = k[index + len(''' / ''') :]
a : int = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a : Union[str, Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a : Optional[int] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a : int = reduce_by_error(errors)
a : str = reduce_by_model(errors)
a : int = make_github_table(reduced_by_error)
a : Optional[int] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa) | 19 | 0 |
"""simple docstring"""
import math
import os
from copy import deepcopy
import datasets
import evaluate
import torch
import transformers
from datasets import load_dataset
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer
from accelerate import Accelerator
from accelerate.test_utils import RegressionDataset, RegressionModel
from accelerate.utils import is_tpu_available, set_seed
a : Dict = 'true'
def _UpperCamelCase ( _A , _A=8_2 , _A=1_6 ) -> Tuple:
set_seed(4_2 )
_UpperCAmelCase = RegressionModel()
_UpperCAmelCase = deepcopy(lowerCAmelCase_ )
_UpperCAmelCase = RegressionDataset(length=lowerCAmelCase_ )
_UpperCAmelCase = DataLoader(lowerCAmelCase_ , batch_size=lowerCAmelCase_ )
model.to(accelerator.device )
_UpperCAmelCase = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ )
return model, ddp_model, dataloader
def _UpperCamelCase ( _A , _A=False ) -> Union[str, Any]:
_UpperCAmelCase = AutoTokenizer.from_pretrained("""hf-internal-testing/mrpc-bert-base-cased""" )
_UpperCAmelCase = load_dataset("""glue""" , """mrpc""" , split="""validation""" )
def tokenize_function(_A ):
_UpperCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=lowerCAmelCase_ , max_length=lowerCAmelCase_ )
return outputs
with accelerator.main_process_first():
_UpperCAmelCase = dataset.map(
lowerCAmelCase_ , batched=lowerCAmelCase_ , remove_columns=["""idx""", """sentence1""", """sentence2"""] , )
_UpperCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_A ):
if use_longest:
return tokenizer.pad(lowerCAmelCase_ , padding="""longest""" , return_tensors="""pt""" )
return tokenizer.pad(lowerCAmelCase_ , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return DataLoader(lowerCAmelCase_ , shuffle=lowerCAmelCase_ , collate_fn=lowerCAmelCase_ , batch_size=1_6 )
def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
_UpperCAmelCase = Accelerator(dispatch_batches=lowerCAmelCase_ , split_batches=lowerCAmelCase_ )
_UpperCAmelCase = get_dataloader(lowerCAmelCase_ , not dispatch_batches )
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(
"""hf-internal-testing/mrpc-bert-base-cased""" , return_dict=lowerCAmelCase_ )
_UpperCAmelCase = accelerator.prepare(lowerCAmelCase_ , lowerCAmelCase_ )
return {"ddp": [ddp_model, ddp_dataloader, "cuda:0"], "no": [model, dataloader, accelerator.device]}, accelerator
def _UpperCamelCase ( _A , _A , _A ) -> Tuple:
_UpperCAmelCase = []
for batch in dataloader:
_UpperCAmelCase = batch.values()
with torch.no_grad():
_UpperCAmelCase = model(lowerCAmelCase_ )
_UpperCAmelCase = accelerator.gather_for_metrics((logit, target) )
logits_and_targets.append((logit, target) )
_UpperCAmelCase = [], []
for logit, targ in logits_and_targets:
logits.append(lowerCAmelCase_ )
targs.append(lowerCAmelCase_ )
_UpperCAmelCase = torch.cat(lowerCAmelCase_ ), torch.cat(lowerCAmelCase_ )
return logits, targs
def _UpperCamelCase ( _A , _A=8_2 , _A=False , _A=False , _A=1_6 ) -> str:
_UpperCAmelCase = get_basic_setup(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
_UpperCAmelCase = generate_predictions(lowerCAmelCase_ , lowerCAmelCase_ , lowerCAmelCase_ )
assert (
len(lowerCAmelCase_ ) == num_samples
), F"""Unexpected number of inputs:\n Expected: {num_samples}\n Actual: {len(lowerCAmelCase_ )}"""
def _UpperCamelCase ( _A = False , _A = False ) -> Optional[int]:
_UpperCAmelCase = evaluate.load("""glue""" , """mrpc""" )
_UpperCAmelCase = get_mrpc_setup(lowerCAmelCase_ , lowerCAmelCase_ )
# First do baseline
_UpperCAmelCase = setup['''no''']
model.to(lowerCAmelCase_ )
model.eval()
for batch in dataloader:
batch.to(lowerCAmelCase_ )
with torch.inference_mode():
_UpperCAmelCase = model(**lowerCAmelCase_ )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
metric.add_batch(predictions=lowerCAmelCase_ , references=batch["""labels"""] )
_UpperCAmelCase = metric.compute()
# Then do distributed
_UpperCAmelCase = setup['''ddp''']
model.eval()
for batch in dataloader:
with torch.inference_mode():
_UpperCAmelCase = model(**lowerCAmelCase_ )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
_UpperCAmelCase = batch['''labels''']
_UpperCAmelCase = accelerator.gather_for_metrics((preds, references) )
metric.add_batch(predictions=lowerCAmelCase_ , references=lowerCAmelCase_ )
_UpperCAmelCase = metric.compute()
for key in "accuracy f1".split():
assert math.isclose(
baseline[key] , distributed[key] ), F"""Baseline and Distributed are not the same for key {key}:\n\tBaseline: {baseline[key]}\n\tDistributed: {distributed[key]}\n"""
def _UpperCamelCase ( ) -> Tuple:
_UpperCAmelCase = Accelerator(split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_ )
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_warning()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# These are a bit slower so they should only be ran on the GPU or TPU
if torch.cuda.is_available() or is_tpu_available():
if accelerator.is_local_main_process:
print("""**Testing gather_for_metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`""" )
test_mrpc(lowerCAmelCase_ , lowerCAmelCase_ )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test torch metrics**""" )
for split_batches in [True, False]:
for dispatch_batches in [True, False]:
_UpperCAmelCase = Accelerator(split_batches=lowerCAmelCase_ , dispatch_batches=lowerCAmelCase_ )
if accelerator.is_local_main_process:
print(F"""With: `split_batches={split_batches}`, `dispatch_batches={dispatch_batches}`, length=99""" )
test_torch_metrics(lowerCAmelCase_ , 9_9 )
accelerator.state._reset_state()
if accelerator.is_local_main_process:
print("""**Test last batch is not dropped when perfectly divisible**""" )
_UpperCAmelCase = Accelerator()
test_torch_metrics(lowerCAmelCase_ , 5_1_2 )
accelerator.state._reset_state()
def _UpperCamelCase ( _A ) -> Any:
main()
if __name__ == "__main__":
main() | 702 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( _UpperCAmelCase ):
a : Any = ['image_processor', 'tokenizer']
a : Optional[int] = 'AutoImageProcessor'
a : Any = 'AutoTokenizer'
def __init__( self : List[str] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=None , **__UpperCamelCase : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop("""feature_extractor""" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def __call__( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->List[str]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""images""" , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""text""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_UpperCAmelCase = self.image_processor(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if text is not None:
_UpperCAmelCase = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCAmelCase = encodings["""input_ids"""]
return inputs
def _snake_case ( self : Union[str, Any] , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ) ->Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ) ->int:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@contextmanager
def _snake_case ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer
yield
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def _snake_case ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Union[str, Any]=None ) ->List[str]:
'''simple docstring'''
if added_vocab is None:
_UpperCAmelCase = self.tokenizer.get_added_vocab()
_UpperCAmelCase = {}
while tokens:
_UpperCAmelCase = re.search(r"""<s_(.*?)>""" , __UpperCamelCase , re.IGNORECASE )
if start_token is None:
break
_UpperCAmelCase = start_token.group(1 )
_UpperCAmelCase = re.search(rf"""</s_{key}>""" , __UpperCamelCase , re.IGNORECASE )
_UpperCAmelCase = start_token.group()
if end_token is None:
_UpperCAmelCase = tokens.replace(__UpperCamelCase , """""" )
else:
_UpperCAmelCase = end_token.group()
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __UpperCamelCase , re.IGNORECASE )
if content is not None:
_UpperCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_UpperCAmelCase = self.tokenajson(__UpperCamelCase , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if value:
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = value[0]
_UpperCAmelCase = value
else: # leaf nodes
_UpperCAmelCase = []
for leaf in content.split(r"""<sep/>""" ):
_UpperCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_UpperCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCamelCase )
if len(output[key] ) == 1:
_UpperCAmelCase = output[key][0]
_UpperCAmelCase = tokens[tokens.find(__UpperCamelCase ) + len(__UpperCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if len(__UpperCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , )
return self.image_processor | 19 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Any = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
a : Optional[int] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 703 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( _A , _A , _A ) -> float:
"""simple docstring"""
_UpperCAmelCase = x
_UpperCAmelCase = y
for step in range(_A ): # noqa: B007
_UpperCAmelCase = a * a - b * b + x
_UpperCAmelCase = 2 * a * b + y
_UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_A , 1 , 1 ) )
def _UpperCamelCase ( _A = 8_0_0 , _A = 6_0_0 , _A = -0.6 , _A = 0 , _A = 3.2 , _A = 5_0 , _A = True , ) -> Image.Image:
"""simple docstring"""
_UpperCAmelCase = Image.new("""RGB""" , (image_width, image_height) )
_UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(_A ):
for image_y in range(_A ):
# determine the figure-coordinates based on the image-coordinates
_UpperCAmelCase = figure_width / image_width * image_height
_UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
_UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
_UpperCAmelCase = get_distance(_A , _A , _A )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_UpperCAmelCase = get_color_coded_rgb(_A )
else:
_UpperCAmelCase = get_black_and_white_rgb(_A )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 19 | 0 |
"""simple docstring"""
from collections import deque
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : int , __UpperCamelCase : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = process_name # process name
_UpperCAmelCase = arrival_time # arrival time of the process
# completion time of finished process or last interrupted time
_UpperCAmelCase = arrival_time
_UpperCAmelCase = burst_time # remaining burst time
_UpperCAmelCase = 0 # total time of the process wait in ready queue
_UpperCAmelCase = 0 # time from arrival time to completion time
class a_ :
def __init__( self : List[str] , __UpperCamelCase : int , __UpperCamelCase : list[int] , __UpperCamelCase : deque[Process] , __UpperCamelCase : int , ) ->Any:
'''simple docstring'''
_UpperCAmelCase = number_of_queues
# time slice of queues that round robin algorithm applied
_UpperCAmelCase = time_slices
# unfinished process is in this ready_queue
_UpperCAmelCase = queue
# current time
_UpperCAmelCase = current_time
# finished process is in this sequence queue
_UpperCAmelCase = deque()
def _snake_case ( self : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(self.finish_queue ) ):
sequence.append(self.finish_queue[i].process_name )
return sequence
def _snake_case ( self : List[str] , __UpperCamelCase : list[Process] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(__snake_case ) ):
waiting_times.append(queue[i].waiting_time )
return waiting_times
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : list[Process] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(__snake_case ) ):
turnaround_times.append(queue[i].turnaround_time )
return turnaround_times
def _snake_case ( self : Any , __UpperCamelCase : list[Process] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = []
for i in range(len(__snake_case ) ):
completion_times.append(queue[i].stop_time )
return completion_times
def _snake_case ( self : int , __UpperCamelCase : deque[Process] ) ->Any:
'''simple docstring'''
return [q.burst_time for q in queue]
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Process ) ->Any:
'''simple docstring'''
process.waiting_time += self.current_time - process.stop_time
return process.waiting_time
def _snake_case ( self : List[str] , __UpperCamelCase : deque[Process] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = deque() # sequence deque of finished process
while len(__snake_case ) != 0:
_UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of current process
self.update_waiting_time(__snake_case )
# update current time
self.current_time += cp.burst_time
# finish the process and set the process's burst-time 0
_UpperCAmelCase = 0
# set the process's turnaround time because it is finished
_UpperCAmelCase = self.current_time - cp.arrival_time
# set the completion time
_UpperCAmelCase = self.current_time
# add the process to queue that has finished queue
finished.append(__snake_case )
self.finish_queue.extend(__snake_case ) # add finished process to finish queue
# FCFS will finish all remaining processes
return finished
def _snake_case ( self : Tuple , __UpperCamelCase : deque[Process] , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase = deque() # sequence deque of terminated process
# just for 1 cycle and unfinished processes will go back to queue
for _ in range(len(__snake_case ) ):
_UpperCAmelCase = ready_queue.popleft() # current process
# if process's arrival time is later than current time, update current time
if self.current_time < cp.arrival_time:
self.current_time += cp.arrival_time
# update waiting time of unfinished processes
self.update_waiting_time(__snake_case )
# if the burst time of process is bigger than time-slice
if cp.burst_time > time_slice:
# use CPU for only time-slice
self.current_time += time_slice
# update remaining burst time
cp.burst_time -= time_slice
# update end point time
_UpperCAmelCase = self.current_time
# locate the process behind the queue because it is not finished
ready_queue.append(__snake_case )
else:
# use CPU for remaining burst time
self.current_time += cp.burst_time
# set burst time 0 because the process is finished
_UpperCAmelCase = 0
# set the finish time
_UpperCAmelCase = self.current_time
# update the process' turnaround time because it is finished
_UpperCAmelCase = self.current_time - cp.arrival_time
# add the process to queue that has finished queue
finished.append(__snake_case )
self.finish_queue.extend(__snake_case ) # add finished process to finish queue
# return finished processes queue and remaining processes queue
return finished, ready_queue
def _snake_case ( self : str ) ->Any:
'''simple docstring'''
for i in range(self.number_of_queues - 1 ):
_UpperCAmelCase = self.round_robin(
self.ready_queue , self.time_slices[i] )
# the last queue has first_come_first_served algorithm
self.first_come_first_served(self.ready_queue )
return self.finish_queue
if __name__ == "__main__":
import doctest
a : Dict = Process('''P1''', 0, 5_3)
a : Any = Process('''P2''', 0, 1_7)
a : Any = Process('''P3''', 0, 6_8)
a : Union[str, Any] = Process('''P4''', 0, 2_4)
a : Optional[int] = 3
a : Dict = [1_7, 2_5]
a : Optional[Any] = deque([Pa, Pa, Pa, Pa])
if len(time_slices) != number_of_queues - 1:
raise SystemExit(0)
doctest.testmod(extraglobs={'''queue''': deque([Pa, Pa, Pa, Pa])})
a : str = Process('''P1''', 0, 5_3)
a : Optional[Any] = Process('''P2''', 0, 1_7)
a : List[str] = Process('''P3''', 0, 6_8)
a : Optional[int] = Process('''P4''', 0, 2_4)
a : Dict = 3
a : List[Any] = [1_7, 2_5]
a : Union[str, Any] = deque([Pa, Pa, Pa, Pa])
a : Optional[int] = MLFQ(number_of_queues, time_slices, queue, 0)
a : List[str] = mlfq.multi_level_feedback_queue()
# print total waiting times of processes(P1, P2, P3, P4)
print(
F"waiting time:\
\t\t\t{MLFQ.calculate_waiting_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print completion times of processes(P1, P2, P3, P4)
print(
F"completion time:\
\t\t{MLFQ.calculate_completion_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print total turnaround times of processes(P1, P2, P3, P4)
print(
F"turnaround time:\
\t\t{MLFQ.calculate_turnaround_time(mlfq, [Pa, Pa, Pa, Pa])}"
)
# print sequence of finished processes
print(
F"sequence of finished processes:\
{mlfq.calculate_sequence_of_finish_queue()}"
) | 704 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a_ ( nn.Module ):
def __init__( self : List[str] , __UpperCamelCase : int = 16 , __UpperCamelCase : int = 88 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 32 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "geglu" , __UpperCamelCase : Optional[int] = None , ) ->Dict:
'''simple docstring'''
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__UpperCamelCase , attention_head_dim=__UpperCamelCase , in_channels=__UpperCamelCase , num_layers=__UpperCamelCase , dropout=__UpperCamelCase , norm_num_groups=__UpperCamelCase , cross_attention_dim=__UpperCamelCase , attention_bias=__UpperCamelCase , sample_size=__UpperCamelCase , num_vector_embeds=__UpperCamelCase , activation_fn=__UpperCamelCase , num_embeds_ada_norm=__UpperCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def _snake_case ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : bool = True , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase , cross_attention_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
from argparse import ArgumentParser, Namespace
from ..utils import logging
from . import BaseTransformersCLICommand
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
return ConvertCommand(
args.model_type , args.tf_checkpoint , args.pytorch_dump_output , args.config , args.finetuning_task_name )
a : Any = """
transformers can only be used from the commandline to convert TensorFlow models in PyTorch, In that case, it requires
TensorFlow to be installed. Please see https://www.tensorflow.org/install/ for installation instructions.
"""
class a_ ( _UpperCAmelCase ):
@staticmethod
def _snake_case ( __UpperCamelCase : ArgumentParser ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = parser.add_parser(
"""convert""" , help="""CLI tool to run convert model from original author checkpoints to Transformers PyTorch checkpoints.""" , )
train_parser.add_argument("""--model_type""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""Model\'s type.""" )
train_parser.add_argument(
"""--tf_checkpoint""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""TensorFlow checkpoint path or folder.""" )
train_parser.add_argument(
"""--pytorch_dump_output""" , type=__UpperCamelCase , required=__UpperCamelCase , help="""Path to the PyTorch saved model output.""" )
train_parser.add_argument("""--config""" , type=__UpperCamelCase , default="""""" , help="""Configuration file path or folder.""" )
train_parser.add_argument(
"""--finetuning_task_name""" , type=__UpperCamelCase , default=__UpperCamelCase , help="""Optional fine-tuning task name if the TF model was a finetuned model.""" , )
train_parser.set_defaults(func=__UpperCamelCase )
def __init__( self : str , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : str , *__UpperCamelCase : Optional[int] , ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = logging.get_logger("""transformers-cli/converting""" )
self._logger.info(f"""Loading model {model_type}""" )
_UpperCAmelCase = model_type
_UpperCAmelCase = tf_checkpoint
_UpperCAmelCase = pytorch_dump_output
_UpperCAmelCase = config
_UpperCAmelCase = finetuning_task_name
def _snake_case ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
if self._model_type == "albert":
try:
from ..models.albert.convert_albert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "bert":
try:
from ..models.bert.convert_bert_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "funnel":
try:
from ..models.funnel.convert_funnel_original_tf_checkpoint_to_pytorch import (
convert_tf_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "t5":
try:
from ..models.ta.convert_ta_original_tf_checkpoint_to_pytorch import convert_tf_checkpoint_to_pytorch
except ImportError:
raise ImportError(__UpperCamelCase )
convert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "gpt":
from ..models.openai.convert_openai_original_tf_checkpoint_to_pytorch import (
convert_openai_checkpoint_to_pytorch,
)
convert_openai_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "transfo_xl":
try:
from ..models.transfo_xl.convert_transfo_xl_original_tf_checkpoint_to_pytorch import (
convert_transfo_xl_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCamelCase )
if "ckpt" in self._tf_checkpoint.lower():
_UpperCAmelCase = self._tf_checkpoint
_UpperCAmelCase = """"""
else:
_UpperCAmelCase = self._tf_checkpoint
_UpperCAmelCase = """"""
convert_transfo_xl_checkpoint_to_pytorch(
__UpperCamelCase , self._config , self._pytorch_dump_output , __UpperCamelCase )
elif self._model_type == "gpt2":
try:
from ..models.gpta.convert_gpta_original_tf_checkpoint_to_pytorch import (
convert_gpta_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCamelCase )
convert_gpta_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
elif self._model_type == "xlnet":
try:
from ..models.xlnet.convert_xlnet_original_tf_checkpoint_to_pytorch import (
convert_xlnet_checkpoint_to_pytorch,
)
except ImportError:
raise ImportError(__UpperCamelCase )
convert_xlnet_checkpoint_to_pytorch(
self._tf_checkpoint , self._config , self._pytorch_dump_output , self._finetuning_task_name )
elif self._model_type == "xlm":
from ..models.xlm.convert_xlm_original_pytorch_checkpoint_to_pytorch import (
convert_xlm_checkpoint_to_pytorch,
)
convert_xlm_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "lxmert":
from ..models.lxmert.convert_lxmert_original_tf_checkpoint_to_pytorch import (
convert_lxmert_checkpoint_to_pytorch,
)
convert_lxmert_checkpoint_to_pytorch(self._tf_checkpoint , self._pytorch_dump_output )
elif self._model_type == "rembert":
from ..models.rembert.convert_rembert_tf_checkpoint_to_pytorch import (
convert_rembert_tf_checkpoint_to_pytorch,
)
convert_rembert_tf_checkpoint_to_pytorch(self._tf_checkpoint , self._config , self._pytorch_dump_output )
else:
raise ValueError(
"""--model_type should be selected in the list [bert, gpt, gpt2, t5, transfo_xl, xlnet, xlm, lxmert]""" ) | 705 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( _A , _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = LxmertConfig.from_json_file(_A )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase = LxmertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_A , _A , _A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 19 | 0 |
def _UpperCamelCase ( _A = 1_0_0_0_0_0_0 ) -> int:
"""simple docstring"""
_UpperCAmelCase = set(range(3 , lowercase_ , 2 ) )
primes.add(2 )
for p in range(3 , lowercase_ , 2 ):
if p not in primes:
continue
primes.difference_update(set(range(p * p , lowercase_ , lowercase_ ) ) )
_UpperCAmelCase = [float(lowercase_ ) for n in range(limit + 1 )]
for p in primes:
for n in range(lowercase_ , limit + 1 , lowercase_ ):
phi[n] *= 1 - 1 / p
return int(sum(phi[2:] ) )
if __name__ == "__main__":
print(F"{solution() = }") | 706 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
a : str = '''examples/'''
a : List[str] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a : Tuple = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
a : List[str] = '''README.md'''
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase ,_UpperCAmelCase = REPLACE_PATTERNS[pattern]
_UpperCAmelCase = replace.replace("""VERSION""" , _A )
_UpperCAmelCase = re_pattern.sub(_A , _A )
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_A )
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern="""examples""" )
def _UpperCamelCase ( _A , _A=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
_UpperCAmelCase = """🤗 Transformers currently provides the following architectures"""
_UpperCAmelCase = """1. Want to contribute a new model?"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase = f.readlines()
# Find the start of the list.
_UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
_UpperCAmelCase = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_A )
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = REPLACE_PATTERNS["""init"""][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def _UpperCamelCase ( _A=False ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
_UpperCAmelCase = default_version.base_version
elif patch:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_UpperCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
_UpperCAmelCase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
def _UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_version()
_UpperCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
_UpperCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
_UpperCAmelCase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_A )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work() | 19 | 0 |
"""simple docstring"""
import math
import tensorflow as tf
from packaging import version
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = tf.convert_to_tensor(__UpperCamelCase )
_UpperCAmelCase = 0.5 * (1.0 + tf.math.erf(x / tf.cast(tf.sqrt(2.0 ) , x.dtype ) ))
return x * cdf
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = tf.convert_to_tensor(__UpperCamelCase )
_UpperCAmelCase = tf.cast(math.pi , x.dtype )
_UpperCAmelCase = tf.cast(0.044_715 , x.dtype )
_UpperCAmelCase = 0.5 * (1.0 + tf.tanh(tf.sqrt(2.0 / pi ) * (x + coeff * tf.pow(__UpperCamelCase , 3 )) ))
return x * cdf
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = tf.convert_to_tensor(__UpperCamelCase )
return x * tf.tanh(tf.math.softplus(__UpperCamelCase ) )
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = tf.convert_to_tensor(__UpperCamelCase )
_UpperCAmelCase = tf.cast(0.044_715 , x.dtype )
_UpperCAmelCase = tf.cast(0.7_978_845_608 , x.dtype )
return 0.5 * x * (1.0 + tf.tanh(x * coeffa * (1.0 + coeffa * x * x) ))
def _UpperCamelCase ( _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = tf.convert_to_tensor(__UpperCamelCase )
_UpperCAmelCase = tf.cast(1.702 , x.dtype )
return x * tf.math.sigmoid(coeff * x )
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
return tf.clip_by_value(_gelu(__UpperCamelCase ) , -1_0 , 1_0 )
def _UpperCamelCase ( _A , _A=-1 ) -> Any:
"""simple docstring"""
_UpperCAmelCase ,_UpperCAmelCase = tf.split(__UpperCamelCase , 2 , axis=__UpperCamelCase )
return a * tf.math.sigmoid(__UpperCamelCase )
if version.parse(tf.version.VERSION) >= version.parse('''2.4'''):
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
return tf.keras.activations.gelu(__UpperCamelCase , approximate=__UpperCamelCase )
a : Optional[Any] = tf.keras.activations.gelu
a : List[str] = approximate_gelu_wrap
else:
a : str = _gelu
a : List[str] = _gelu_new
a : str = {
'''gelu''': gelu,
'''gelu_10''': gelu_aa,
'''gelu_fast''': gelu_fast,
'''gelu_new''': gelu_new,
'''glu''': glu,
'''mish''': mish,
'''quick_gelu''': quick_gelu,
'''relu''': tf.keras.activations.relu,
'''sigmoid''': tf.keras.activations.sigmoid,
'''silu''': tf.keras.activations.swish,
'''swish''': tf.keras.activations.swish,
'''tanh''': tf.keras.activations.tanh,
}
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
if activation_string in ACTaFN:
return ACTaFN[activation_string]
else:
raise KeyError(F"""function {activation_string} not found in ACT2FN mapping {list(ACTaFN.keys() )}""" ) | 707 |
"""simple docstring"""
from __future__ import annotations
def _UpperCamelCase ( _A ) -> None:
"""simple docstring"""
create_state_space_tree(_A , [] , 0 , [0 for i in range(len(_A ) )] )
def _UpperCamelCase ( _A , _A , _A , _A , ) -> None:
"""simple docstring"""
if index == len(_A ):
print(_A )
return
for i in range(len(_A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_UpperCAmelCase = True
create_state_space_tree(_A , _A , index + 1 , _A )
current_sequence.pop()
_UpperCAmelCase = False
a : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
a : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 19 | 0 |
"""simple docstring"""
import argparse
import json
import os
import time
import zipfile
from get_ci_error_statistics import download_artifact, get_artifacts_links
from transformers import logging
a : Optional[Any] = logging.get_logger(__name__)
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = set()
_UpperCAmelCase = []
def parse_line(_A ):
for line in fp:
if isinstance(_A , _A ):
_UpperCAmelCase = line.decode("""UTF-8""" )
if "warnings summary (final)" in line:
continue
# This means we are outside the body of a warning
elif not line.startswith(""" """ ):
# process a single warning and move it to `selected_warnings`.
if len(_A ) > 0:
_UpperCAmelCase = """\n""".join(_A )
# Only keep the warnings specified in `targets`
if any(F""": {x}: """ in warning for x in targets ):
selected_warnings.add(_A )
buffer.clear()
continue
else:
_UpperCAmelCase = line.strip()
buffer.append(_A )
if from_gh:
for filename in os.listdir(_A ):
_UpperCAmelCase = os.path.join(_A , _A )
if not os.path.isdir(_A ):
# read the file
if filename != "warnings.txt":
continue
with open(_A ) as fp:
parse_line(_A )
else:
try:
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
if filename != "warnings.txt":
continue
with z.open(_A ) as fp:
parse_line(_A )
except Exception:
logger.warning(
F"""{artifact_path} is either an invalid zip file or something else wrong. This file is skipped.""" )
return selected_warnings
def _UpperCamelCase ( _A , _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = set()
_UpperCAmelCase = [os.path.join(_A , _A ) for p in os.listdir(_A ) if (p.endswith(""".zip""" ) or from_gh)]
for p in paths:
selected_warnings.update(extract_warnings_from_single_artifact(_A , _A ) )
return selected_warnings
if __name__ == "__main__":
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
return values.split(""",""" )
a : Union[str, Any] = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
# optional parameters
parser.add_argument(
'''--targets''',
default='''DeprecationWarning,UserWarning,FutureWarning''',
type=list_str,
help='''Comma-separated list of target warning(s) which we want to extract.''',
)
parser.add_argument(
'''--from_gh''',
action='''store_true''',
help='''If running from a GitHub action workflow and collecting warnings from its artifacts.''',
)
a : List[str] = parser.parse_args()
a : Union[str, Any] = args.from_gh
if from_gh:
# The artifacts have to be downloaded using `actions/download-artifact@v3`
pass
else:
os.makedirs(args.output_dir, exist_ok=True)
# get download links
a : Any = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
# download artifacts
for idx, (name, url) in enumerate(artifacts.items()):
print(name)
print(url)
print('''=''' * 8_0)
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
# extract warnings from artifacts
a : Union[str, Any] = extract_warnings(args.output_dir, args.targets)
a : Any = sorted(selected_warnings)
with open(os.path.join(args.output_dir, '''selected_warnings.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(selected_warnings, fp, ensure_ascii=False, indent=4) | 708 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : int=32 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : Dict=3 , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=32 , __UpperCamelCase : Any=4 , __UpperCamelCase : Optional[int]=[0, 1, 2, 3] , __UpperCamelCase : str=4 , __UpperCamelCase : Optional[Any]=37 , __UpperCamelCase : str="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Any=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : int=[1, 3_84, 24, 24] , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any=None , ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = backbone_out_indices
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = backbone_featmap_shape
_UpperCAmelCase = scope
_UpperCAmelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 1_92, 3_84, 7_68],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _snake_case ( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _snake_case ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a : int = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : str = False
a : List[str] = False
a : Dict = False
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = DPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def _snake_case ( self : Optional[int] ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
pass
def _snake_case ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def _snake_case ( self : str ) ->Any:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ):
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = False
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
_UpperCAmelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_UpperCAmelCase = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self : Dict ) ->Tuple:
'''simple docstring'''
pass
@slow
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_UpperCAmelCase = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = """add"""
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class a_ ( unittest.TestCase ):
def _snake_case ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
_UpperCAmelCase = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
_UpperCAmelCase = outputs.predicted_depth
# verify the predicted depth
_UpperCAmelCase = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , __UpperCamelCase , atol=1e-4 ) ) | 19 | 0 |
"""simple docstring"""
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
a : List[Any] = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
a : Tuple = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
a : Optional[int] = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
a : Any = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
a : Any = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
a : Any = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
a : List[str] = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = randrange(len(snake_case_ ) ), randrange(len(snake_case_ ) )
_UpperCAmelCase = ["""Loss""", """Tie""", """Win"""][(play >= oppo) + (play > oppo)]
_UpperCAmelCase = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def _UpperCamelCase ( _A = 1_0_0 ) -> List[Any]:
"""simple docstring"""
return (generate_random_hand() for _ in range(snake_case_ ))
@pytest.mark.parametrize("""hand, expected""" , snake_case_ )
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(snake_case_ )._is_flush() == expected
@pytest.mark.parametrize("""hand, expected""" , snake_case_ )
def _UpperCamelCase ( _A , _A ) -> int:
"""simple docstring"""
assert PokerHand(snake_case_ )._is_straight() == expected
@pytest.mark.parametrize("""hand, expected, card_values""" , snake_case_ )
def _UpperCamelCase ( _A , _A , _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = PokerHand(snake_case_ )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize("""hand, expected""" , snake_case_ )
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
assert PokerHand(snake_case_ )._is_same_kind() == expected
@pytest.mark.parametrize("""hand, expected""" , snake_case_ )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
assert PokerHand(snake_case_ )._hand_type == expected
@pytest.mark.parametrize("""hand, other, expected""" , snake_case_ )
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
assert PokerHand(snake_case_ ).compare_with(PokerHand(snake_case_ ) ) == expected
@pytest.mark.parametrize("""hand, other, expected""" , generate_random_hands() )
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
assert PokerHand(snake_case_ ).compare_with(PokerHand(snake_case_ ) ) == expected
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = [PokerHand(snake_case_ ) for hand in SORTED_HANDS]
_UpperCAmelCase = poker_hands.copy()
shuffle(snake_case_ )
_UpperCAmelCase = chain(sorted(snake_case_ ) )
for index, hand in enumerate(snake_case_ ):
assert hand == poker_hands[index]
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = [PokerHand("""2D AC 3H 4H 5S""" ), PokerHand("""2S 3H 4H 5S 6C""" )]
pokerhands.sort(reverse=snake_case_ )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = PokerHand("""2C 4S AS 3D 5C""" )
_UpperCAmelCase = True
_UpperCAmelCase = [5, 4, 3, 2, 1_4]
for _ in range(1_0 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def _UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = os.path.abspath(os.path.dirname(snake_case_ ) )
_UpperCAmelCase = os.path.join(snake_case_ , """poker_hands.txt""" )
with open(snake_case_ ) as file_hand:
for line in file_hand:
_UpperCAmelCase = line[:1_4].strip()
_UpperCAmelCase = line[1_5:].strip()
_UpperCAmelCase = PokerHand(snake_case_ ), PokerHand(snake_case_ )
_UpperCAmelCase = player.compare_with(snake_case_ )
if output == "Win":
answer += 1
assert answer == 3_7_6 | 709 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a : List[str] = logging.get_logger(__name__)
class a_ ( enum.Enum ):
a : Optional[Any] = 0
a : Dict = 1
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'generated'
def __init__( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : str ) ->Any:
'''simple docstring'''
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _snake_case ( self : Optional[int] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Dict=None , **__UpperCamelCase : Any , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
if truncation is not None:
_UpperCAmelCase = truncation
_UpperCAmelCase = generate_kwargs
_UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
_UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCAmelCase = self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
_UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _snake_case ( self : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
return True
def _snake_case ( self : Optional[Any] , *__UpperCamelCase : Any , __UpperCamelCase : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
_UpperCAmelCase = ([prefix + arg for arg in args[0]],)
_UpperCAmelCase = True
elif isinstance(args[0] , __UpperCamelCase ):
_UpperCAmelCase = (prefix + args[0],)
_UpperCAmelCase = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
_UpperCAmelCase = self.tokenizer(*__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Dict , *__UpperCamelCase : str , **__UpperCamelCase : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = super().__call__(*__UpperCamelCase , **__UpperCamelCase )
if (
isinstance(args[0] , __UpperCamelCase )
and all(isinstance(__UpperCamelCase , __UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _snake_case ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : str=TruncationStrategy.DO_NOT_TRUNCATE , **__UpperCamelCase : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self._parse_and_tokenize(__UpperCamelCase , truncation=__UpperCamelCase , **__UpperCamelCase )
return inputs
def _snake_case ( self : str , __UpperCamelCase : Dict , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
if self.framework == "pt":
_UpperCAmelCase ,_UpperCAmelCase = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
_UpperCAmelCase ,_UpperCAmelCase = tf.shape(model_inputs["""input_ids"""] ).numpy()
_UpperCAmelCase = generate_kwargs.get("""min_length""" , self.model.config.min_length )
_UpperCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__UpperCamelCase , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
_UpperCAmelCase = self.model.generate(**__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCAmelCase = output_ids.reshape(__UpperCamelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
_UpperCAmelCase = tf.reshape(__UpperCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any]=ReturnType.TEXT , __UpperCamelCase : int=False ) ->Any:
'''simple docstring'''
_UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCAmelCase = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
_UpperCAmelCase = {
f"""{self.return_name}_text""": self.tokenizer.decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , )
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'summary'
def __call__( self : Optional[Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[int] ) ->Any:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : str , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : Optional[int] = 'translation'
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def _snake_case ( self : Tuple , *__UpperCamelCase : List[str] , __UpperCamelCase : Tuple=TruncationStrategy.DO_NOT_TRUNCATE , __UpperCamelCase : Tuple=None , __UpperCamelCase : Union[str, Any]=None ) ->Tuple:
'''simple docstring'''
if getattr(self.tokenizer , """_build_translation_inputs""" , __UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase , return_tensors=self.framework , truncation=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase , truncation=__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : int=None , __UpperCamelCase : int=None , **__UpperCamelCase : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
_UpperCAmelCase = src_lang
if tgt_lang is not None:
_UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCAmelCase = kwargs.get("""task""" , self.task )
_UpperCAmelCase = task.split("""_""" )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
_UpperCAmelCase = items[1]
_UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->int:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
import inspect
import unittest
from transformers import YolosConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import YolosForObjectDetection, YolosModel
from transformers.models.yolos.modeling_yolos import YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import AutoImageProcessor
class a_ :
def __init__( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : int=13 , __UpperCamelCase : int=[30, 30] , __UpperCamelCase : int=2 , __UpperCamelCase : Tuple=3 , __UpperCamelCase : Dict=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Tuple=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : Dict=4 , __UpperCamelCase : List[str]=37 , __UpperCamelCase : List[str]="gelu" , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Any=10 , __UpperCamelCase : List[Any]=0.0_2 , __UpperCamelCase : Dict=3 , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[Any]=8 , __UpperCamelCase : Union[str, Any]=10 , ) ->str:
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = scope
_UpperCAmelCase = n_targets
_UpperCAmelCase = num_detection_tokens
# we set the expected sequence length (which is used in several tests)
# expected sequence length = num_patches + 1 (we add 1 for the [CLS] token) + num_detection_tokens
_UpperCAmelCase = (image_size[1] // patch_size) * (image_size[0] // patch_size)
_UpperCAmelCase = num_patches + 1 + self.num_detection_tokens
def _snake_case ( self : Optional[int] ) ->List[str]:
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size[0], self.image_size[1]] )
_UpperCAmelCase = None
if self.use_labels:
# labels is a list of Dict (each Dict being the labels for a given example in the batch)
_UpperCAmelCase = []
for i in range(self.batch_size ):
_UpperCAmelCase = {}
_UpperCAmelCase = torch.randint(
high=self.num_labels , size=(self.n_targets,) , device=UpperCamelCase__ )
_UpperCAmelCase = torch.rand(self.n_targets , 4 , device=UpperCamelCase__ )
labels.append(UpperCamelCase__ )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
return YolosConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=UpperCamelCase__ , initializer_range=self.initializer_range , num_detection_tokens=self.num_detection_tokens , num_labels=self.num_labels , )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int] , __UpperCamelCase : int ) ->Any:
_UpperCAmelCase = YolosModel(config=UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCAmelCase = model(UpperCamelCase__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (self.batch_size, self.expected_seq_len, self.hidden_size) )
def _snake_case ( self : Any , __UpperCamelCase : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : int ) ->Optional[int]:
_UpperCAmelCase = YolosForObjectDetection(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
_UpperCAmelCase = model(pixel_values=UpperCamelCase__ )
_UpperCAmelCase = model(UpperCamelCase__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
_UpperCAmelCase = model(pixel_values=UpperCamelCase__ , labels=UpperCamelCase__ )
self.parent.assertEqual(result.loss.shape , () )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_detection_tokens, self.num_labels + 1) )
self.parent.assertEqual(result.pred_boxes.shape , (self.batch_size, self.num_detection_tokens, 4) )
def _snake_case ( self : Dict ) ->Union[str, Any]:
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {'''pixel_values''': pixel_values}
return config, inputs_dict
@require_torch
class a_ ( SCREAMING_SNAKE_CASE_ , SCREAMING_SNAKE_CASE_ , unittest.TestCase ):
a : Dict = (YolosModel, YolosForObjectDetection) if is_torch_available() else ()
a : str = (
{'feature-extraction': YolosModel, 'object-detection': YolosForObjectDetection} if is_torch_available() else {}
)
a : Optional[Any] = False
a : Tuple = False
a : Optional[Any] = False
a : Union[str, Any] = False
def _snake_case ( self : Optional[Any] , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : Dict=False ) ->Optional[int]:
_UpperCAmelCase = super()._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ , return_labels=UpperCamelCase__ )
if return_labels:
if model_class.__name__ == "YolosForObjectDetection":
_UpperCAmelCase = []
for i in range(self.model_tester.batch_size ):
_UpperCAmelCase = {}
_UpperCAmelCase = torch.ones(
size=(self.model_tester.n_targets,) , device=UpperCamelCase__ , dtype=torch.long )
_UpperCAmelCase = torch.ones(
self.model_tester.n_targets , 4 , device=UpperCamelCase__ , dtype=torch.float )
labels.append(UpperCamelCase__ )
_UpperCAmelCase = labels
return inputs_dict
def _snake_case ( self : Dict ) ->Tuple:
_UpperCAmelCase = YolosModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=UpperCamelCase__ , has_text_modality=UpperCamelCase__ , hidden_size=37 )
def _snake_case ( self : Optional[Any] ) ->List[Any]:
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[Any] ) ->Tuple:
# YOLOS does not use inputs_embeds
pass
def _snake_case ( self : Any ) ->Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCamelCase__ )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(UpperCamelCase__ , nn.Linear ) )
def _snake_case ( self : int ) ->Dict:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(UpperCamelCase__ )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ['''pixel_values''']
self.assertListEqual(arg_names[:1] , UpperCamelCase__ )
def _snake_case ( self : Tuple ) ->List[str]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*UpperCamelCase__ )
def _snake_case ( self : Dict ) ->Optional[Any]:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
# in YOLOS, the seq_len is different
_UpperCAmelCase = self.model_tester.expected_seq_len
for model_class in self.all_model_classes:
_UpperCAmelCase = True
_UpperCAmelCase = False
_UpperCAmelCase = True
_UpperCAmelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
# check that output_attentions also work using config
del inputs_dict["output_attentions"]
_UpperCAmelCase = True
_UpperCAmelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
_UpperCAmelCase = len(UpperCamelCase__ )
# Check attention is always last and order is fine
_UpperCAmelCase = True
_UpperCAmelCase = True
_UpperCAmelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCAmelCase = 1
self.assertEqual(out_len + added_hidden_states , len(UpperCamelCase__ ) )
_UpperCAmelCase = outputs.attentions
self.assertEqual(len(UpperCamelCase__ ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(self_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads, seq_len, seq_len] , )
def _snake_case ( self : List[str] ) ->str:
def check_hidden_states_output(__UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : List[str] ):
_UpperCAmelCase = model_class(UpperCamelCase__ )
model.to(UpperCamelCase__ )
model.eval()
with torch.no_grad():
_UpperCAmelCase = model(**self._prepare_for_class(UpperCamelCase__ , UpperCamelCase__ ) )
_UpperCAmelCase = outputs.hidden_states
_UpperCAmelCase = getattr(
self.model_tester , """expected_num_hidden_layers""" , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(UpperCamelCase__ ) , UpperCamelCase__ )
# YOLOS has a different seq_length
_UpperCAmelCase = self.model_tester.expected_seq_len
self.assertListEqual(
list(hidden_states[0].shape[-2:] ) , [seq_length, self.model_tester.hidden_size] , )
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
_UpperCAmelCase = True
check_hidden_states_output(UpperCamelCase__ , UpperCamelCase__ , UpperCamelCase__ )
def _snake_case ( self : Optional[int] ) ->Tuple:
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_object_detection(*UpperCamelCase__ )
@slow
def _snake_case ( self : Union[str, Any] ) ->Any:
for model_name in YOLOS_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
_UpperCAmelCase = YolosModel.from_pretrained(UpperCamelCase__ )
self.assertIsNotNone(UpperCamelCase__ )
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
@cached_property
def _snake_case ( self : List[str] ) ->str:
return AutoImageProcessor.from_pretrained("""hustvl/yolos-small""" ) if is_vision_available() else None
@slow
def _snake_case ( self : Any ) ->List[Any]:
_UpperCAmelCase = YolosForObjectDetection.from_pretrained("""hustvl/yolos-small""" ).to(UpperCamelCase__ )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=UpperCamelCase__ , return_tensors="""pt""" ).to(UpperCamelCase__ )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(inputs.pixel_values )
# verify outputs
_UpperCAmelCase = torch.Size((1, 1_00, 92) )
self.assertEqual(outputs.logits.shape , UpperCamelCase__ )
_UpperCAmelCase = torch.tensor(
[[-24.02_48, -10.30_24, -14.82_90], [-42.03_92, -16.82_00, -27.43_34], [-27.27_43, -11.81_54, -18.71_48]] , device=UpperCamelCase__ , )
_UpperCAmelCase = torch.tensor(
[[0.2_5_5_9, 0.5_4_5_5, 0.4_7_0_6], [0.2_9_8_9, 0.7_2_7_9, 0.1_8_7_5], [0.7_7_3_2, 0.4_0_1_7, 0.4_4_6_2]] , device=UpperCamelCase__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
self.assertTrue(torch.allclose(outputs.pred_boxes[0, :3, :3] , UpperCamelCase__ , atol=1e-4 ) )
# verify postprocessing
_UpperCAmelCase = image_processor.post_process_object_detection(
UpperCamelCase__ , threshold=0.3 , target_sizes=[image.size[::-1]] )[0]
_UpperCAmelCase = torch.tensor([0.9_9_9_4, 0.9_7_9_0, 0.9_9_6_4, 0.9_9_7_2, 0.9_8_6_1] ).to(UpperCamelCase__ )
_UpperCAmelCase = [75, 75, 17, 63, 17]
_UpperCAmelCase = torch.tensor([3_35.06_09, 79.38_48, 3_75.42_16, 1_87.24_95] ).to(UpperCamelCase__ )
self.assertEqual(len(results["""scores"""] ) , 5 )
self.assertTrue(torch.allclose(results["""scores"""] , UpperCamelCase__ , atol=1e-4 ) )
self.assertSequenceEqual(results["""labels"""].tolist() , UpperCamelCase__ )
self.assertTrue(torch.allclose(results["""boxes"""][0, :] , UpperCamelCase__ ) ) | 710 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
@property
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__UpperCamelCase )
def _snake_case ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.dummy_uncond_unet
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = self.dummy_vq_model
_UpperCAmelCase = LDMPipeline(unet=__UpperCamelCase , vqvae=__UpperCamelCase , scheduler=__UpperCamelCase )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" ).images
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" , return_dict=__UpperCamelCase )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a_ ( unittest.TestCase ):
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=5 , output_type="""numpy""" ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCAmelCase = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 19 | 0 |
"""simple docstring"""
import warnings
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
from ...models import UNetaDModel
from ...schedulers import RePaintScheduler
from ...utils import PIL_INTERPOLATION, logging, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
a : str = logging.get_logger(__name__) # pylint: disable=invalid-name
def _UpperCamelCase ( _A ) -> int:
warnings.warn(
"""The preprocess method is deprecated and will be removed in a future version. Please"""
""" use VaeImageProcessor.preprocess instead""" , lowerCamelCase_ , )
if isinstance(lowerCamelCase_ , torch.Tensor ):
return image
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
_UpperCAmelCase = [image]
if isinstance(image[0] , PIL.Image.Image ):
_UpperCAmelCase ,_UpperCAmelCase = image[0].size
_UpperCAmelCase ,_UpperCAmelCase = (x - x % 8 for x in (w, h)) # resize to integer multiple of 8
_UpperCAmelCase = [np.array(i.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""] ) )[None, :] for i in image]
_UpperCAmelCase = np.concatenate(lowerCamelCase_ , axis=0 )
_UpperCAmelCase = np.array(lowerCamelCase_ ).astype(np.floataa ) / 2_5_5.0
_UpperCAmelCase = image.transpose(0 , 3 , 1 , 2 )
_UpperCAmelCase = 2.0 * image - 1.0
_UpperCAmelCase = torch.from_numpy(lowerCamelCase_ )
elif isinstance(image[0] , torch.Tensor ):
_UpperCAmelCase = torch.cat(lowerCamelCase_ , dim=0 )
return image
def _UpperCamelCase ( _A ) -> List[Any]:
if isinstance(lowerCamelCase_ , torch.Tensor ):
return mask
elif isinstance(lowerCamelCase_ , PIL.Image.Image ):
_UpperCAmelCase = [mask]
if isinstance(mask[0] , PIL.Image.Image ):
_UpperCAmelCase ,_UpperCAmelCase = mask[0].size
_UpperCAmelCase ,_UpperCAmelCase = (x - x % 3_2 for x in (w, h)) # resize to integer multiple of 32
_UpperCAmelCase = [np.array(m.convert("""L""" ).resize((w, h) , resample=PIL_INTERPOLATION["""nearest"""] ) )[None, :] for m in mask]
_UpperCAmelCase = np.concatenate(lowerCamelCase_ , axis=0 )
_UpperCAmelCase = mask.astype(np.floataa ) / 2_5_5.0
_UpperCAmelCase = 0
_UpperCAmelCase = 1
_UpperCAmelCase = torch.from_numpy(lowerCamelCase_ )
elif isinstance(mask[0] , torch.Tensor ):
_UpperCAmelCase = torch.cat(lowerCamelCase_ , dim=0 )
return mask
class a_ ( lowerCAmelCase__ ):
a : UNetaDModel
a : RePaintScheduler
def __init__( self : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ) ->List[Any]:
'''simple docstring'''
super().__init__()
self.register_modules(unet=_lowerCamelCase , scheduler=_lowerCamelCase )
@torch.no_grad()
def __call__( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple , __UpperCamelCase : str = 2_50 , __UpperCamelCase : Optional[Any] = 0.0 , __UpperCamelCase : int = 10 , __UpperCamelCase : Tuple = 10 , __UpperCamelCase : Optional[Any] = None , __UpperCamelCase : Optional[int] = "pil" , __UpperCamelCase : Union[str, Any] = True , ) ->Union[ImagePipelineOutput, Tuple]:
'''simple docstring'''
_UpperCAmelCase = image
_UpperCAmelCase = _preprocess_image(_lowerCamelCase )
_UpperCAmelCase = original_image.to(device=self.device , dtype=self.unet.dtype )
_UpperCAmelCase = _preprocess_mask(_lowerCamelCase )
_UpperCAmelCase = mask_image.to(device=self.device , dtype=self.unet.dtype )
_UpperCAmelCase = original_image.shape[0]
# sample gaussian noise to begin the loop
if isinstance(_lowerCamelCase , _lowerCamelCase ) and len(_lowerCamelCase ) != batch_size:
raise ValueError(
f"""You have passed a list of generators of length {len(_lowerCamelCase )}, but requested an effective batch"""
f""" size of {batch_size}. Make sure the batch size matches the length of the generators.""" )
_UpperCAmelCase = original_image.shape
_UpperCAmelCase = randn_tensor(_lowerCamelCase , generator=_lowerCamelCase , device=self.device , dtype=self.unet.dtype )
# set step values
self.scheduler.set_timesteps(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , self.device )
_UpperCAmelCase = eta
_UpperCAmelCase = self.scheduler.timesteps[0] + 1
_UpperCAmelCase = generator[0] if isinstance(_lowerCamelCase , _lowerCamelCase ) else generator
for i, t in enumerate(self.progress_bar(self.scheduler.timesteps ) ):
if t < t_last:
# predict the noise residual
_UpperCAmelCase = self.unet(_lowerCamelCase , _lowerCamelCase ).sample
# compute previous image: x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ).prev_sample
else:
# compute the reverse: x_t-1 -> x_t
_UpperCAmelCase = self.scheduler.undo_step(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
_UpperCAmelCase = t
_UpperCAmelCase = (image / 2 + 0.5).clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(_lowerCamelCase )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_lowerCamelCase ) | 711 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
a : str = True
except (ImportError, ModuleNotFoundError):
a : List[str] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
re.sub("""<n>""" , """""" , _A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_A ) ) | 19 | 0 |
"""simple docstring"""
import inspect
import unittest
import numpy as np
from transformers import BeitConfig
from transformers.testing_utils import require_flax, require_vision, slow
from transformers.utils import cached_property, is_flax_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, floats_tensor, ids_tensor
if is_flax_available():
import jax
from transformers import FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling, FlaxBeitModel
if is_vision_available():
from PIL import Image
from transformers import BeitImageProcessor
class a_ ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict=1_00 , __UpperCamelCase : Optional[Any]=13 , __UpperCamelCase : Dict=30 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Dict=3 , __UpperCamelCase : str=True , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Union[str, Any]=32 , __UpperCamelCase : List[Any]=5 , __UpperCamelCase : Optional[Any]=4 , __UpperCamelCase : Union[str, Any]=37 , __UpperCamelCase : Optional[int]="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : List[Any]=0.1 , __UpperCamelCase : str=10 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : List[Any]=3 , ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = vocab_size
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = type_sequence_label_size
_UpperCAmelCase = initializer_range
# in BeiT, the seq length equals the number of patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def _snake_case ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size] , self.type_sequence_label_size )
_UpperCAmelCase = BeitConfig(
vocab_size=self.vocab_size , image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , )
return config, pixel_values, labels
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = FlaxBeitModel(config=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = FlaxBeitForMaskedImageModeling(config=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length - 1, self.vocab_size) )
def _snake_case ( self : int , __UpperCamelCase : int , __UpperCamelCase : Any , __UpperCamelCase : Tuple ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.type_sequence_label_size
_UpperCAmelCase = FlaxBeitForImageClassification(config=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.type_sequence_label_size) )
# test greyscale images
_UpperCAmelCase = 1
_UpperCAmelCase = FlaxBeitForImageClassification(__UpperCamelCase )
_UpperCAmelCase = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
_UpperCAmelCase = model(__UpperCamelCase )
def _snake_case ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.prepare_config_and_inputs()
(
_UpperCAmelCase
) = config_and_inputs
_UpperCAmelCase = {"pixel_values": pixel_values}
return config, inputs_dict
@require_flax
class a_ ( _A , unittest.TestCase ):
'''simple docstring'''
a : Any = (
(FlaxBeitModel, FlaxBeitForImageClassification, FlaxBeitForMaskedImageModeling) if is_flax_available() else ()
)
def _snake_case ( self : str ) ->None:
'''simple docstring'''
_UpperCAmelCase = FlaxBeitModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def _snake_case ( self : Any ) ->Dict:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : int ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.__call__ )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["pixel_values"]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model_class(__UpperCamelCase )
@jax.jit
def model_jitted(__UpperCamelCase : List[str] , **__UpperCamelCase : Dict ):
return model(pixel_values=__UpperCamelCase , **__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = model_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = model_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__UpperCamelCase )
def _snake_case ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*__UpperCamelCase )
@slow
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""microsoft/beit-base-patch16-224""" )
_UpperCAmelCase = model(np.ones((1, 3, 2_24, 2_24) ) )
self.assertIsNotNone(__UpperCamelCase )
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_vision
@require_flax
class a_ ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _snake_case ( self : Dict ) ->Any:
'''simple docstring'''
return BeitImageProcessor.from_pretrained("""microsoft/beit-base-patch16-224""" ) if is_vision_available() else None
@slow
def _snake_case ( self : List[str] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = FlaxBeitForMaskedImageModeling.from_pretrained("""microsoft/beit-base-patch16-224-pt22k""" )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="""np""" ).pixel_values
# prepare bool_masked_pos
_UpperCAmelCase = np.ones((1, 1_96) , dtype=__UpperCamelCase )
# forward pass
_UpperCAmelCase = model(pixel_values=__UpperCamelCase , bool_masked_pos=__UpperCamelCase )
_UpperCAmelCase = outputs.logits
# verify the logits
_UpperCAmelCase = (1, 1_96, 81_92)
self.assertEqual(logits.shape , __UpperCamelCase )
_UpperCAmelCase = np.array(
[[-3.2_4_3_7, 0.5_0_7_2, -1_3.9_1_7_4], [-3.2_4_5_6, 0.4_9_4_8, -1_3.9_4_0_1], [-3.2_0_3_3, 0.5_1_2_1, -1_3.8_5_5_0]] )
self.assertTrue(np.allclose(logits[bool_masked_pos][:3, :3] , __UpperCamelCase , atol=1e-2 ) )
@slow
def _snake_case ( self : Optional[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-base-patch16-224""" )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="""np""" )
# forward pass
_UpperCAmelCase = model(**__UpperCamelCase )
_UpperCAmelCase = outputs.logits
# verify the logits
_UpperCAmelCase = (1, 10_00)
self.assertEqual(logits.shape , __UpperCamelCase )
_UpperCAmelCase = np.array([-1.2_3_8_5, -1.0_9_8_7, -1.0_1_0_8] )
self.assertTrue(np.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
_UpperCAmelCase = 2_81
self.assertEqual(logits.argmax(-1 ).item() , __UpperCamelCase )
@slow
def _snake_case ( self : Optional[int] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = FlaxBeitForImageClassification.from_pretrained("""microsoft/beit-large-patch16-224-pt22k-ft22k""" )
_UpperCAmelCase = self.default_image_processor
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="""np""" )
# forward pass
_UpperCAmelCase = model(**__UpperCamelCase )
_UpperCAmelCase = outputs.logits
# verify the logits
_UpperCAmelCase = (1, 2_18_41)
self.assertEqual(logits.shape , __UpperCamelCase )
_UpperCAmelCase = np.array([1.6_8_8_1, -0.2_7_8_7, 0.5_9_0_1] )
self.assertTrue(np.allclose(logits[0, :3] , __UpperCamelCase , atol=1e-4 ) )
_UpperCAmelCase = 23_96
self.assertEqual(logits.argmax(-1 ).item() , __UpperCamelCase ) | 712 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a : str = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a_ :
a : List[Any] = PegasusConfig
a : Dict = {}
a : List[Any] = 'gelu'
def __init__( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple=13 , __UpperCamelCase : Tuple=7 , __UpperCamelCase : Tuple=True , __UpperCamelCase : Any=False , __UpperCamelCase : Any=99 , __UpperCamelCase : Optional[int]=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Dict=37 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Optional[Any]=20 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Tuple=0 , ) ->int:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def _snake_case ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _snake_case ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _UpperCamelCase ( _A , _A , _A , _A=None , _A=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = np.not_equal(_A , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a_ ( _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a : Any = True
a : int = False
a : Union[str, Any] = False
a : Optional[int] = False
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase )
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(__UpperCamelCase : List[Any] , __UpperCamelCase : str=None , **__UpperCamelCase : int ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_UpperCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self : int ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__UpperCamelCase )
_UpperCAmelCase = np.ones((1, 1) )
_UpperCAmelCase = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_UpperCAmelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""np""" , truncation=__UpperCamelCase , max_length=5_12 , padding=__UpperCamelCase )
_UpperCAmelCase = model.generate(**__UpperCamelCase , num_beams=2 ).sequences
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert tgt_text == decoded | 19 | 0 |
"""simple docstring"""
import pprint
import requests
a : List[Any] = '''https://zenquotes.io/api'''
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/today""" ).json()
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
return requests.get(API_ENDPOINT_URL + """/random""" ).json()
if __name__ == "__main__":
a : Optional[Any] = random_quotes()
pprint.pprint(response) | 713 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : Optional[int]=13 , __UpperCamelCase : List[str]=7 , __UpperCamelCase : List[Any]=9 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : str=True , __UpperCamelCase : int=False , __UpperCamelCase : int=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : List[Any]=8 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : str=0.0_0_2 , __UpperCamelCase : Union[str, Any]=1 , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : Tuple=0 , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Any=None , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = encoder_seq_length
_UpperCAmelCase = decoder_seq_length
# For common tests
_UpperCAmelCase = self.decoder_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = d_ff
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = decoder_start_token_id
_UpperCAmelCase = None
_UpperCAmelCase = decoder_layers
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
return TaConfig.from_pretrained("""google/umt5-base""" )
def _snake_case ( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : str=None , ) ->int:
'''simple docstring'''
if attention_mask is None:
_UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCamelCase )
if decoder_head_mask is None:
_UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
if cross_attn_head_mask is None:
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = config.num_attention_heads
_UpperCAmelCase = self.prepare_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, input_dict
def _snake_case ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : Tuple ) ->Dict:
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , )
_UpperCAmelCase = model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase )
_UpperCAmelCase = result.last_hidden_state
_UpperCAmelCase = result.past_key_values
_UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , ) ->str:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).get_decoder().to(__UpperCamelCase ).eval()
# first forward pass
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) + 1 )
_UpperCAmelCase ,_UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = model(__UpperCamelCase )["""last_hidden_state"""]
_UpperCAmelCase = model(__UpperCamelCase , past_key_values=__UpperCamelCase )["""last_hidden_state"""]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def _snake_case ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Dict , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).to(__UpperCamelCase ).half().eval()
_UpperCAmelCase = model(**__UpperCamelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__UpperCamelCase ).any().item() )
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a : Optional[Any] = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a : Any = True
a : Optional[int] = False
a : Any = False
a : Optional[int] = True
a : Optional[Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a : int = [0.8, 0.9]
def _snake_case ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = UMTaModel(config_and_inputs[0] ).to(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=__UpperCamelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs[0]
_UpperCAmelCase = UMTaForConditionalGeneration(__UpperCamelCase ).eval()
model.to(__UpperCamelCase )
_UpperCAmelCase = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__UpperCamelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
}
for attn_name, (name, mask) in zip(__UpperCamelCase , head_masking.items() ):
_UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCamelCase )
_UpperCAmelCase = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , **__UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _snake_case ( self : Tuple ) ->List[Any]:
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__UpperCamelCase ).to(__UpperCamelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__UpperCamelCase , legacy=__UpperCamelCase )
_UpperCAmelCase = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase ).input_ids
# fmt: off
_UpperCAmelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model.generate(input_ids.to(__UpperCamelCase ) )
_UpperCAmelCase = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
def _UpperCamelCase ( _A = 1_0_0 ) -> str:
"""simple docstring"""
_UpperCAmelCase = (n * (n + 1) // 2) ** 2
_UpperCAmelCase = n * (n + 1) * (2 * n + 1) // 6
return sum_cubes - sum_squares
if __name__ == "__main__":
print(F"{solution() = }") | 714 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( _UpperCAmelCase ):
def _snake_case ( self : str ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : Any ) ->List[str]:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _snake_case ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : str ) ->Dict:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def _snake_case ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def _snake_case ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _snake_case ( self : str ) ->Optional[Any]:
'''simple docstring'''
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__UpperCamelCase ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
_UpperCAmelCase ,_UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __UpperCamelCase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(_A ) if isinstance(_A , pa.Buffer ) else pa.memory_map(_A )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=_A , features=_A ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_A )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
_UpperCAmelCase = os.path.join(_A , """test.arrow""" )
with ArrowWriter(path=_A , schema=pa.schema(_A ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(_A , 1 )
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
if pa.types.is_list(_A ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
if isinstance(lst[0] , _A ):
change_first_primitive_element_in_list(lst[0] , _A )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(_A , optimized_int_type=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(_A )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_A , _A )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=_A ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = """mock://dataset-train.arrow"""
with ArrowWriter(path=_A , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_A ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_A )
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_A , format="""png""" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=_A , features=Features({"""image""": Image()} ) , embed_local_files=_A ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , _A )
with open(_A , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("""col_1""" , pa.string() , nullable=_A )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=_A ) as writer:
writer._build_writer(inferred_schema=_A )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] ) | 19 | 0 |
"""simple docstring"""
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def _UpperCamelCase ( ) -> str:
"""simple docstring"""
_UpperCAmelCase = """mock-s3-bucket"""
_UpperCAmelCase = F"""s3://{mock_bucket}"""
_UpperCAmelCase = extract_path_from_uri(_UpperCAmelCase )
assert dataset_path.startswith("""s3://""" ) is False
_UpperCAmelCase = """./local/path"""
_UpperCAmelCase = extract_path_from_uri(_UpperCAmelCase )
assert dataset_path == new_dataset_path
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = is_remote_filesystem(_UpperCAmelCase )
assert is_remote is True
_UpperCAmelCase = fsspec.filesystem("""file""" )
_UpperCAmelCase = is_remote_filesystem(_UpperCAmelCase )
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" , _UpperCAmelCase )
def _UpperCamelCase ( _A , _A , _A , _A , _A , _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
_UpperCAmelCase = input_paths[compression_fs_class.protocol]
if input_path is None:
_UpperCAmelCase = F"""for \'{compression_fs_class.protocol}\' compression protocol, """
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_UpperCAmelCase )
_UpperCAmelCase = fsspec.filesystem(compression_fs_class.protocol , fo=_UpperCAmelCase )
assert isinstance(_UpperCAmelCase , _UpperCAmelCase )
_UpperCAmelCase = os.path.basename(_UpperCAmelCase )
_UpperCAmelCase = expected_filename[: expected_filename.rindex(""".""" )]
assert fs.glob("""*""" ) == [expected_filename]
with fs.open(_UpperCAmelCase , """r""" , encoding="""utf-8""" ) as f, open(_UpperCAmelCase , encoding="""utf-8""" ) as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""] )
def _UpperCamelCase ( _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
_UpperCAmelCase = compressed_file_paths[protocol]
_UpperCAmelCase = """dataset.jsonl"""
_UpperCAmelCase = F"""{protocol}://{member_file_path}::{compressed_file_path}"""
_UpperCAmelCase ,*_UpperCAmelCase = fsspec.get_fs_token_paths(_UpperCAmelCase )
assert fs.isfile(_UpperCAmelCase )
assert not fs.isfile("""non_existing_""" + member_file_path )
@pytest.mark.integration
def _UpperCamelCase ( _A , _A , _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = hf_api.dataset_info(_UpperCAmelCase , token=_UpperCAmelCase )
_UpperCAmelCase = HfFileSystem(repo_info=_UpperCAmelCase , token=_UpperCAmelCase )
assert sorted(hffs.glob("""*""" ) ) == [".gitattributes", "data"]
assert hffs.isdir("""data""" )
assert hffs.isfile(""".gitattributes""" ) and hffs.isfile("""data/text_data.txt""" )
with open(_UpperCAmelCase ) as f:
assert hffs.open("""data/text_data.txt""" , """r""" ).read() == f.read()
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(_UpperCAmelCase , _UpperCAmelCase , clobber=_UpperCAmelCase )
with pytest.warns(_UpperCAmelCase ) as warning_info:
importlib.reload(datasets.filesystems )
assert len(_UpperCAmelCase ) == 1
assert (
str(warning_info[0].message )
== F"""A filesystem protocol was already set for {protocol} and will be overwritten."""
) | 715 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : List[Any] = get_logger()
a : Optional[dict] = None
class a_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : int ) ->Tuple:
'''simple docstring'''
super().__init__(features=__UpperCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(__UpperCamelCase )}, as `jaxlib.xla_extension.Device` """
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
_UpperCAmelCase = device if isinstance(__UpperCamelCase , __UpperCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
_UpperCAmelCase = str(jax.devices()[0] )
_UpperCAmelCase = jnp_array_kwargs
@staticmethod
def _snake_case ( ) ->Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(__UpperCamelCase ): device for device in jax.devices()}
def _snake_case ( self : Dict , __UpperCamelCase : Any ) ->Union[str, Any]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , __UpperCamelCase ) and column:
if all(
isinstance(__UpperCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__UpperCamelCase , axis=0 )
return column
def _snake_case ( self : List[str] , __UpperCamelCase : Any ) ->Optional[int]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , (str, bytes, type(__UpperCamelCase )) ):
return value
elif isinstance(__UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase = {}
if isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
else:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
elif isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCamelCase , PIL.Image.Image ):
_UpperCAmelCase = np.asarray(__UpperCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__UpperCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__UpperCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__UpperCamelCase , """__array__""" ) and not isinstance(__UpperCamelCase , jax.Array ):
_UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
elif isinstance(__UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : dict ) ->int:
'''simple docstring'''
return map_nested(self._recursive_tensorize , __UpperCamelCase , map_list=__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_row(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_row(__UpperCamelCase )
return self.recursive_tensorize(__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : pa.Table ) ->"jax.Array":
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_column(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_column(__UpperCamelCase , pa_table.column_names[0] )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
_UpperCAmelCase = self._consolidate(__UpperCamelCase )
return column
def _snake_case ( self : Optional[Any] , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_batch(__UpperCamelCase )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
for column_name in batch:
_UpperCAmelCase = self._consolidate(batch[column_name] )
return batch | 19 | 0 |
"""simple docstring"""
from __future__ import annotations
from collections.abc import Generator
import requests
from bsa import BeautifulSoup
a : List[str] = '''https://www.indeed.co.in/jobs?q=mobile+app+development&l='''
def _UpperCamelCase ( _A = "mumbai" ) -> Generator[tuple[str, str], None, None]:
"""simple docstring"""
_UpperCAmelCase = BeautifulSoup(requests.get(url + location ).content , """html.parser""" )
# This attribute finds out all the specifics listed in a job
for job in soup.find_all("""div""" , attrs={"""data-tn-component""": """organicJob"""} ):
_UpperCAmelCase = job.find("""a""" , attrs={"""data-tn-element""": """jobTitle"""} ).text.strip()
_UpperCAmelCase = job.find("""span""" , {"""class""": """company"""} ).text.strip()
yield job_title, company_name
if __name__ == "__main__":
for i, job in enumerate(fetch_jobs('''Bangalore'''), 1):
print(F"Job {i:>2} is {job[0]} at {job[1]}") | 716 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Tuple = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 19 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tokenizers_available, is_torch_available
a : str = {
'''configuration_biogpt''': ['''BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''BioGptConfig'''],
'''tokenization_biogpt''': ['''BioGptTokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Dict = [
'''BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''BioGptForCausalLM''',
'''BioGptForTokenClassification''',
'''BioGptForSequenceClassification''',
'''BioGptModel''',
'''BioGptPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_biogpt import BIOGPT_PRETRAINED_CONFIG_ARCHIVE_MAP, BioGptConfig
from .tokenization_biogpt import BioGptTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_biogpt import (
BIOGPT_PRETRAINED_MODEL_ARCHIVE_LIST,
BioGptForCausalLM,
BioGptForSequenceClassification,
BioGptForTokenClassification,
BioGptModel,
BioGptPreTrainedModel,
)
else:
import sys
a : Any = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 717 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
a : int = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _UpperCamelCase ( _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = test_results.split(""" """ )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_UpperCAmelCase = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(_A ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = None
_UpperCAmelCase = False
for line in failures_short_lines.split("""\n""" ):
if re.search(R"""_ \[doctest\]""" , _A ):
_UpperCAmelCase = True
_UpperCAmelCase = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
_UpperCAmelCase = line
_UpperCAmelCase = False
return failures
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = title
_UpperCAmelCase = doc_test_results["""time_spent"""].split(""",""" )[0]
_UpperCAmelCase = doc_test_results["""success"""]
_UpperCAmelCase = doc_test_results["""failures"""]
_UpperCAmelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
_UpperCAmelCase = doc_test_results
@property
def _snake_case ( self : int ) ->str:
'''simple docstring'''
_UpperCAmelCase = [self._time_spent]
_UpperCAmelCase = 0
for time in time_spent:
_UpperCAmelCase = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = [0, 0, time_parts[0]]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f"""{int(__UpperCamelCase )}h{int(__UpperCamelCase )}m{int(__UpperCamelCase )}s"""
@property
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = 40
_UpperCAmelCase = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(__UpperCamelCase , __UpperCamelCase )}
_UpperCAmelCase = """"""
for category, failures in category_failures.items():
if len(__UpperCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__UpperCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__UpperCamelCase )
@staticmethod
def _snake_case ( ) ->Any:
'''simple docstring'''
_UpperCAmelCase = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(__UpperCamelCase )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=__UpperCamelCase , )
def _snake_case ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
_UpperCAmelCase = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else """All tests passed."""
_UpperCAmelCase = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=__UpperCamelCase , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase = """"""
for key, value in failures.items():
_UpperCAmelCase = value[:2_00] + """ [Truncated]""" if len(__UpperCamelCase ) > 2_50 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
_UpperCAmelCase = job_name
_UpperCAmelCase = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
_UpperCAmelCase = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _snake_case ( self : int ) ->Optional[Any]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
_UpperCAmelCase = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
_UpperCAmelCase = sorted(self.doc_test_results.items() , key=lambda __UpperCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
_UpperCAmelCase = f"""*Num failures* :{len(job_result["failed"] )} \n"""
_UpperCAmelCase = job_result["""failures"""]
_UpperCAmelCase = self.get_reply_blocks(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , text=__UpperCamelCase )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f"""Results for {job}""" , blocks=__UpperCamelCase , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = os.environ["""GITHUB_RUN_ID"""]
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A ).json()
_UpperCAmelCase = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , _A )
return {}
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {}
if os.path.exists(_A ):
_UpperCAmelCase = os.listdir(_A )
for file in files:
try:
with open(os.path.join(_A , _A ) , encoding="""utf-8""" ) as f:
_UpperCAmelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(_A , _A )}.""" ) from e
return _artifact
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
class a_ :
def __init__( self : List[Any] , __UpperCamelCase : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = name
_UpperCAmelCase = []
def __str__( self : int ) ->Optional[Any]:
'''simple docstring'''
return self.name
def _snake_case ( self : Dict , __UpperCamelCase : str ) ->int:
'''simple docstring'''
self.paths.append({"""name""": self.name, """path""": path} )
_UpperCAmelCase = {}
_UpperCAmelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
_UpperCAmelCase = directory
if artifact_name not in _available_artifacts:
_UpperCAmelCase = Artifact(_A )
_available_artifacts[artifact_name].add_path(_A )
return _available_artifacts
if __name__ == "__main__":
a : Dict = get_job_links()
a : Dict = retrieve_available_artifacts()
a : Optional[int] = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
a : Dict = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
a : int = github_actions_job_links.get('''run_doctests''')
a : Tuple = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
a : Optional[Any] = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
a , a , a : str = handle_test_results(artifact['''stats'''])
a : Tuple = failed
a : int = success
a : Any = time_spent[1:-1] + ''', '''
a : Dict = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
a : List[Any] = line.replace('''FAILED ''', '''''')
a : Tuple = line.split()[0].replace('''\n''', '''''')
if "::" in line:
a , a : Union[str, Any] = line.split('''::''')
else:
a , a : Optional[Any] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
a : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
a : Optional[Any] = all_failures[test] if test in all_failures else '''N/A'''
a : List[str] = failure
break
a : List[Any] = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply() | 19 | 0 |
from dataclasses import dataclass
from enum import Enum
from typing import List, Optional, Union
import numpy as np
import PIL
from PIL import Image
from ...utils import BaseOutput, is_torch_available, is_transformers_available
@dataclass
class a_ ( __snake_case ):
a : int = 42
a : int = 42
if is_transformers_available() and is_torch_available():
from .pipeline_semantic_stable_diffusion import SemanticStableDiffusionPipeline | 718 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase ( _A , _A=False ) -> str:
"""simple docstring"""
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
a : Union[str, Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
a : Tuple = parse_flag_from_env('''RUN_REMOTE''', default=False)
a : Union[str, Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
a : int = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
a : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
a : List[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
a : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
a : int = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
a : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
a : Any = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
a : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires faiss""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires regex""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires elasticsearch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires sqlalchemy""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires PyTorch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
if not config.TF_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires TensorFlow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
if not config.JAX_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires JAX""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not config.PIL_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires Pillow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
def _require_spacy_model(_A ):
try:
import spacy # noqa F401
spacy.load(_A )
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(_A ) )(_A )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCAmelCase = unittest.skip("""test is slow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
_UpperCAmelCase = unittest.skip("""test is local""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCAmelCase = unittest.skip("""test is packaged""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCAmelCase = unittest.skip("""test requires remote""" )(_A )
return test_case
def _UpperCamelCase ( *_A ) -> Dict:
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_A ) and name.startswith("""test""" ):
for decorator in decorators:
_UpperCAmelCase = decorator(_A )
setattr(cls , _A , _A )
return cls
return decorate
class a_ ( _UpperCAmelCase ):
pass
class a_ ( _UpperCAmelCase ):
a : Any = 0
a : Optional[Any] = 1
a : int = 2
@contextmanager
def _UpperCamelCase ( _A=OfflineSimulationMode.CONNECTION_FAILS , _A=1e-16 ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = requests.Session().request
def timeout_request(_A , _A , _A , **_A ):
# Change the url to an invalid url so that the connection hangs
_UpperCAmelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_UpperCAmelCase = timeout
try:
return online_request(_A , _A , **_A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCAmelCase = url
_UpperCAmelCase = e.args[0]
_UpperCAmelCase = (max_retry_error.args[0].replace("""10.255.255.1""" , F"""OfflineMock[{url}]""" ),)
_UpperCAmelCase = (max_retry_error,)
raise
def raise_connection_error(_A , _A , **_A ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=_A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , _A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , _A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , _A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _UpperCamelCase ( *_A , **_A ) -> str:
"""simple docstring"""
_UpperCAmelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_A , **_A ) as tmp_dir:
try:
os.chdir(_A )
yield
finally:
os.chdir(_A )
@contextmanager
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
return deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist()
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_A , *_A , **_A ):
try:
return func(*_A , **_A )
except HTTPError as err:
if str(_A ).startswith("""500""" ) or str(_A ).startswith("""502""" ):
pytest.xfail(str(_A ) )
raise err
return decorator.decorator(_wrapper , _A )
class a_ :
def __init__( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_A )
else:
break
async def _UpperCamelCase ( _A , _A=None , _A=None , _A=None , _A=False , _A=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(_A ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_A , _A , _A , _A="" ):
_UpperCAmelCase = line.decode("""utf-8""" ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _A : tee(_A , _A , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda _A : tee(_A , _A , sys.stderr , label="""stderr:""" ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def _UpperCamelCase ( _A , _A=None , _A=None , _A=1_8_0 , _A=False , _A=True ) -> _RunOutput:
"""simple docstring"""
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
_UpperCAmelCase = """ """.join(_A )
if result.returncode > 0:
_UpperCAmelCase = """\n""".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
_UpperCAmelCase = re.sub(R"""^gw""" , """""" , _A , 0 , re.M )
return int(_A )
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 2_9_5_0_0
_UpperCAmelCase = pytest_xdist_worker_id()
return port + uniq_delta | 19 | 0 |
"""simple docstring"""
import io
import json
import fsspec
import pytest
from datasets import Dataset, DatasetDict, Features, NamedSplit, Value
from datasets.io.json import JsonDatasetReader, JsonDatasetWriter
from ..utils import assert_arrow_memory_doesnt_increase, assert_arrow_memory_increases
def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _UpperCamelCase ( _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _UpperCamelCase ( _A , _A , _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""},
] , )
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_3""": """float64""", """col_1""": """string""", """col_2""": """int64"""}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_3", "col_1", "col_2"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
def _UpperCamelCase ( _A , _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = {"""col_2""": """int64""", """col_3""": """float64""", """col_1""": """string"""}
_UpperCAmelCase = features.copy()
_UpperCAmelCase = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = JsonDatasetReader(lowercase_ , features=lowercase_ , cache_dir=lowercase_ ).read()
assert isinstance(lowercase_ , lowercase_ )
assert dataset.num_rows == 2
assert dataset.num_columns == 3
assert dataset.column_names == ["col_2", "col_3", "col_1"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _UpperCamelCase ( _A , _A , _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ , split=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
assert dataset.split == split if split else "train"
@pytest.mark.parametrize("""path_type""" , [str, list] )
def _UpperCamelCase ( _A , _A , _A ) -> List[Any]:
"""simple docstring"""
if issubclass(lowercase_ , lowercase_ ):
_UpperCAmelCase = jsonl_path
elif issubclass(lowercase_ , lowercase_ ):
_UpperCAmelCase = [jsonl_path]
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_json_dataset(lowercase_ , lowercase_ )
def _UpperCamelCase ( _A , _A , _A=("train",) ) -> Optional[int]:
"""simple docstring"""
assert isinstance(lowercase_ , lowercase_ )
for split in splits:
_UpperCAmelCase = dataset_dict[split]
assert dataset.num_rows == 4
assert dataset.num_columns == 3
assert dataset.column_names == ["col_1", "col_2", "col_3"]
for feature, expected_dtype in expected_features.items():
assert dataset.features[feature].dtype == expected_dtype
@pytest.mark.parametrize("""keep_in_memory""" , [False, True] )
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
with assert_arrow_memory_increases() if keep_in_memory else assert_arrow_memory_doesnt_increase():
_UpperCAmelCase = JsonDatasetReader({"""train""": jsonl_path} , cache_dir=lowercase_ , keep_in_memory=lowercase_ ).read()
_check_json_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize(
"""features""" , [
None,
{"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""},
{"""col_1""": """string""", """col_2""": """string""", """col_3""": """string"""},
{"""col_1""": """int32""", """col_2""": """int32""", """col_3""": """int32"""},
{"""col_1""": """float32""", """col_2""": """float32""", """col_3""": """float32"""},
] , )
def _UpperCamelCase ( _A , _A , _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = features.copy() if features else default_expected_features
_UpperCAmelCase = (
Features({feature: Value(lowercase_ ) for feature, dtype in features.items()} ) if features is not None else None
)
_UpperCAmelCase = JsonDatasetReader({"""train""": jsonl_path} , features=lowercase_ , cache_dir=lowercase_ ).read()
_check_json_datasetdict(lowercase_ , lowercase_ )
@pytest.mark.parametrize("""split""" , [None, NamedSplit("""train""" ), """train""", """test"""] )
def _UpperCamelCase ( _A , _A , _A ) -> List[str]:
"""simple docstring"""
if split:
_UpperCAmelCase = {split: jsonl_path}
else:
_UpperCAmelCase = """train"""
_UpperCAmelCase = {"""train""": jsonl_path, """test""": jsonl_path}
_UpperCAmelCase = tmp_path / """cache"""
_UpperCAmelCase = {"""col_1""": """string""", """col_2""": """int64""", """col_3""": """float64"""}
_UpperCAmelCase = JsonDatasetReader(lowercase_ , cache_dir=lowercase_ ).read()
_check_json_datasetdict(lowercase_ , lowercase_ , splits=list(path.keys() ) )
assert all(dataset[split].split == split for split in path.keys() )
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
return json.load(lowercase_ )
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
return [json.loads(lowercase_ ) for line in buffer]
class a_ :
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def _snake_case ( self : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__ ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json_function(A__ )
assert isinstance(A__ , A__ )
assert isinstance(exported_content[0] , A__ )
assert len(A__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def _snake_case ( self : Any , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Any ) ->Optional[int]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__ , orient=A__ ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json(A__ )
assert isinstance(A__ , A__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(A__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(A__ ) == 10
@pytest.mark.parametrize("""lines, load_json_function""" , [(True, load_json_lines), (False, load_json)] )
def _snake_case ( self : Tuple , __UpperCamelCase : int , __UpperCamelCase : Dict , __UpperCamelCase : str ) ->Any:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__ , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json_function(A__ )
assert isinstance(A__ , A__ )
assert isinstance(exported_content[0] , A__ )
assert len(A__ ) == 10
@pytest.mark.parametrize(
"""orient, container, keys, len_at""" , [
("""records""", list, {"""tokens""", """labels""", """answers""", """id"""}, None),
("""split""", dict, {"""columns""", """data"""}, """data"""),
("""index""", dict, set("""0123456789""" ), None),
("""columns""", dict, {"""tokens""", """labels""", """answers""", """id"""}, """tokens"""),
("""values""", list, None, None),
("""table""", dict, {"""schema""", """data"""}, """data"""),
] , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Any ) ->Optional[int]:
'''simple docstring'''
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , lines=A__ , orient=A__ , num_proc=2 ).write()
buffer.seek(0 )
_UpperCAmelCase = load_json(A__ )
assert isinstance(A__ , A__ )
if keys:
if container is dict:
assert exported_content.keys() == keys
else:
assert exported_content[0].keys() == keys
else:
assert not hasattr(A__ , """keys""" ) and not hasattr(exported_content[0] , """keys""" )
if len_at:
assert len(exported_content[len_at] ) == 10
else:
assert len(A__ ) == 10
def _snake_case ( self : Tuple , __UpperCamelCase : Tuple ) ->Dict:
'''simple docstring'''
with pytest.raises(A__ ):
with io.BytesIO() as buffer:
JsonDatasetWriter(A__ , A__ , num_proc=0 )
@pytest.mark.parametrize("""compression, extension""" , [("""gzip""", """gz"""), ("""bz2""", """bz2"""), ("""xz""", """xz""")] )
def _snake_case ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = tmp_path_factory.mktemp("""data""" ) / f"""test.json.{extension}"""
_UpperCAmelCase = str(shared_datadir / f"""test_file.json.{extension}""" )
JsonDatasetWriter(A__ , A__ , compression=A__ ).write()
with fsspec.open(A__ , """rb""" , compression="""infer""" ) as f:
_UpperCAmelCase = f.read()
with fsspec.open(A__ , """rb""" , compression="""infer""" ) as f:
_UpperCAmelCase = f.read()
assert exported_content == original_content | 719 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class a_ ( _UpperCAmelCase ):
a : List[Any] = ''
a : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Tuple , __UpperCamelCase : Optional[DatasetInfo] = None , __UpperCamelCase : Optional[str] = None , **__UpperCamelCase : Any , ) ->Any:
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
_UpperCAmelCase = repo_info
_UpperCAmelCase = token
_UpperCAmelCase = None
def _snake_case ( self : List[str] ) ->List[str]:
'''simple docstring'''
if self.dir_cache is None:
_UpperCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_UpperCAmelCase = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : str = "rb" , **__UpperCamelCase : Any , ) ->List[str]:
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
_UpperCAmelCase = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def _snake_case ( self : int , __UpperCamelCase : int , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple=False , **__UpperCamelCase : List[str] ) ->Optional[Any]:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = PurePosixPath(path.strip("""/""" ) )
_UpperCAmelCase = {}
for p, f in self.dir_cache.items():
_UpperCAmelCase = PurePosixPath(p.strip("""/""" ) )
_UpperCAmelCase = p.parent
if root == path:
_UpperCAmelCase = f
_UpperCAmelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 19 | 0 |
a : int = range(2, 2_0 + 1)
a : List[Any] = [1_0**k for k in range(ks[-1] + 1)]
a : Optional[int] = {}
def _UpperCamelCase ( _A , _A , _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = sum(a_i[j] for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ) )
_UpperCAmelCase = sum(a_i[j] * base[j] for j in range(min(len(lowerCAmelCase__ ) , lowerCAmelCase__ ) ) )
_UpperCAmelCase ,_UpperCAmelCase = 0, 0
_UpperCAmelCase = n - i
_UpperCAmelCase = memo.get(lowerCAmelCase__ )
if sub_memo is not None:
_UpperCAmelCase = sub_memo.get(lowerCAmelCase__ )
if jumps is not None and len(lowerCAmelCase__ ) > 0:
# find and make the largest jump without going over
_UpperCAmelCase = -1
for _k in range(len(lowerCAmelCase__ ) - 1 , -1 , -1 ):
if jumps[_k][2] <= k and jumps[_k][1] <= max_dn:
_UpperCAmelCase = _k
break
if max_jump >= 0:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = jumps[max_jump]
# since the difference between jumps is cached, add c
_UpperCAmelCase = diff + c
for j in range(min(lowerCAmelCase__ , len(lowerCAmelCase__ ) ) ):
_UpperCAmelCase ,_UpperCAmelCase = divmod(lowerCAmelCase__ , 1_0 )
if new_c > 0:
add(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
else:
_UpperCAmelCase = []
else:
_UpperCAmelCase = {c: []}
_UpperCAmelCase = sub_memo
if dn >= max_dn or c + diff >= base[k]:
return diff, dn
if k > ks[0]:
while True:
# keep doing smaller jumps
_UpperCAmelCase ,_UpperCAmelCase = next_term(lowerCAmelCase__ , k - 1 , i + dn , lowerCAmelCase__ )
diff += _diff
dn += terms_jumped
if dn >= max_dn or c + diff >= base[k]:
break
else:
# would be too small a jump, just compute sequential terms instead
_UpperCAmelCase ,_UpperCAmelCase = compute(lowerCAmelCase__ , lowerCAmelCase__ , i + dn , lowerCAmelCase__ )
diff += _diff
dn += terms_jumped
_UpperCAmelCase = sub_memo[c]
# keep jumps sorted by # of terms skipped
_UpperCAmelCase = 0
while j < len(lowerCAmelCase__ ):
if jumps[j][1] > dn:
break
j += 1
# cache the jump for this value digitsum(b) and c
sub_memo[c].insert(lowerCAmelCase__ , (diff, dn, k) )
return (diff, dn)
def _UpperCamelCase ( _A , _A , _A , _A ) -> Tuple:
"""simple docstring"""
if i >= n:
return 0, i
if k > len(lowerCAmelCase__ ):
a_i.extend([0 for _ in range(k - len(lowerCAmelCase__ ) )] )
# note: a_i -> b * 10^k + c
# ds_b -> digitsum(b)
# ds_c -> digitsum(c)
_UpperCAmelCase = i
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = 0, 0, 0
for j in range(len(lowerCAmelCase__ ) ):
if j >= k:
ds_b += a_i[j]
else:
ds_c += a_i[j]
while i < n:
i += 1
_UpperCAmelCase = ds_c + ds_b
diff += addend
_UpperCAmelCase = 0
for j in range(lowerCAmelCase__ ):
_UpperCAmelCase = a_i[j] + addend
_UpperCAmelCase ,_UpperCAmelCase = divmod(lowerCAmelCase__ , 1_0 )
ds_c += a_i[j]
if addend > 0:
break
if addend > 0:
add(lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ )
return diff, i - start_i
def _UpperCamelCase ( _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
for j in range(lowerCAmelCase__ , len(lowerCAmelCase__ ) ):
_UpperCAmelCase = digits[j] + addend
if s >= 1_0:
_UpperCAmelCase ,_UpperCAmelCase = divmod(lowerCAmelCase__ , 1_0 )
_UpperCAmelCase = addend // 1_0 + quotient
else:
_UpperCAmelCase = s
_UpperCAmelCase = addend // 1_0
if addend == 0:
break
while addend > 0:
_UpperCAmelCase ,_UpperCAmelCase = divmod(lowerCAmelCase__ , 1_0 )
digits.append(lowerCAmelCase__ )
def _UpperCamelCase ( _A = 1_0**1_5 ) -> int:
"""simple docstring"""
_UpperCAmelCase = [1]
_UpperCAmelCase = 1
_UpperCAmelCase = 0
while True:
_UpperCAmelCase ,_UpperCAmelCase = next_term(lowerCAmelCase__ , 2_0 , i + dn , lowerCAmelCase__ )
dn += terms_jumped
if dn == n - i:
break
_UpperCAmelCase = 0
for j in range(len(lowerCAmelCase__ ) ):
a_n += digits[j] * 1_0**j
return a_n
if __name__ == "__main__":
print(F"{solution() = }") | 720 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
a : Optional[Any] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
a : List[str] = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
a : Any = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
a : int = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
a : List[Any] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any]=[1, 10, 1_00] , __UpperCamelCase : Dict=4 , __UpperCamelCase : Tuple=3.0 ) ->Union[str, Any]:
'''simple docstring'''
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=__UpperCamelCase ) as executor:
_UpperCAmelCase = []
_UpperCAmelCase = Counter()
_UpperCAmelCase = 0
_UpperCAmelCase = defaultdict(__UpperCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(__UpperCamelCase , __UpperCamelCase ) ):
for candidate in candidates:
_UpperCAmelCase = candidate + """\n""" + test_case
_UpperCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
_UpperCAmelCase = executor.submit(__UpperCamelCase , *__UpperCamelCase )
futures.append(__UpperCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__UpperCamelCase ):
_UpperCAmelCase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
_UpperCAmelCase ,_UpperCAmelCase = [], []
for result in results.values():
result.sort()
_UpperCAmelCase = [r[1]["""passed"""] for r in result]
total.append(len(__UpperCamelCase ) )
correct.append(sum(__UpperCamelCase ) )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = k
_UpperCAmelCase = {f"""pass@{k}""": estimate_pass_at_k(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
def estimator(_A , _A , _A ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_A , _A ):
_UpperCAmelCase = itertools.repeat(_A , len(_A ) )
else:
assert len(_A ) == len(_A )
_UpperCAmelCase = iter(_A )
return np.array([estimator(int(_A ) , int(_A ) , _A ) for n, c in zip(_A , _A )] ) | 19 | 0 |
import logging
import re
import pytorch_quantization
import pytorch_quantization.nn as quant_nn
import torch
from pytorch_quantization import calib
from pytorch_quantization.tensor_quant import QuantDescriptor
a : Union[str, Any] = logging.getLogger(__name__)
a : str = 5_0 # max width of layer names
a : int = 7_0 # max width of quantizer names
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = parser.add_argument_group("""quant_trainer arguments""" )
group.add_argument("""--wprec""" , type=_lowerCamelCase , default=8 , help="""weight precision""" )
group.add_argument("""--aprec""" , type=_lowerCamelCase , default=8 , help="""activation precision""" )
group.add_argument("""--quant-per-tensor""" , action="""store_true""" , help="""per tensor weight scaling""" )
group.add_argument("""--quant-disable""" , action="""store_true""" , help="""disable all quantizers""" )
group.add_argument("""--quant-disable-embeddings""" , action="""store_true""" , help="""disable all embeddings quantizers""" )
group.add_argument("""--quant-disable-keyword""" , type=_lowerCamelCase , nargs="""+""" , help="""disable quantizers by keyword""" )
group.add_argument("""--quant-disable-layer-module""" , type=_lowerCamelCase , help="""disable quantizers by keyword under layer.""" )
group.add_argument("""--quant-enable-layer-module""" , type=_lowerCamelCase , help="""enable quantizers by keyword under layer""" )
group.add_argument("""--calibrator""" , default="""max""" , help="""which quantization range calibrator to use""" )
group.add_argument("""--percentile""" , default=_lowerCamelCase , type=_lowerCamelCase , help="""percentile for PercentileCalibrator""" )
group.add_argument("""--fuse-qkv""" , action="""store_true""" , help="""use the same scale factor for qkv""" )
group.add_argument("""--clip-gelu""" , metavar="""N""" , type=_lowerCamelCase , help="""clip gelu output maximum value to N""" )
group.add_argument(
"""--recalibrate-weights""" , action="""store_true""" , help=(
"""recalibrate weight amaxes by taking the max of the weights."""
""" amaxes will be computed with the current quantization granularity (axis)."""
) , )
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
if args.calibrator == "max":
_UpperCAmelCase = """max"""
elif args.calibrator == "percentile":
if args.percentile is None:
raise ValueError("""Specify --percentile when using percentile calibrator""" )
_UpperCAmelCase = """histogram"""
elif args.calibrator == "mse":
_UpperCAmelCase = """histogram"""
else:
raise ValueError(F"""Invalid calibrator {args.calibrator}""" )
_UpperCAmelCase = QuantDescriptor(num_bits=args.aprec , calib_method=_lowerCamelCase )
_UpperCAmelCase = QuantDescriptor(num_bits=args.wprec , axis=(None if args.quant_per_tensor else (0,)) )
quant_nn.QuantLinear.set_default_quant_desc_input(_lowerCamelCase )
quant_nn.QuantLinear.set_default_quant_desc_weight(_lowerCamelCase )
def _UpperCamelCase ( _A , _A , _A=False , _A=False ) -> Any:
"""simple docstring"""
logger.info("""Configuring Model for Quantization""" )
logger.info(F"""using quantization package {pytorch_quantization.__file__}""" )
if not calib:
if args.quant_disable_embeddings:
set_quantizer_by_name(_lowerCamelCase , ["""embeddings"""] , which="""weight""" , _disabled=_lowerCamelCase )
if args.quant_disable:
set_quantizer_by_name(_lowerCamelCase , [""""""] , _disabled=_lowerCamelCase )
if args.quant_disable_keyword:
set_quantizer_by_name(_lowerCamelCase , args.quant_disable_keyword , _disabled=_lowerCamelCase )
if args.quant_disable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [R"""layer.\d+.""" + args.quant_disable_layer_module] , _disabled=_lowerCamelCase )
if args.quant_enable_layer_module:
set_quantizer_by_name(_lowerCamelCase , [R"""layer.\d+.""" + args.quant_enable_layer_module] , _disabled=_lowerCamelCase )
if args.recalibrate_weights:
recalibrate_weights(_lowerCamelCase )
if args.fuse_qkv:
fuse_qkv(_lowerCamelCase , _lowerCamelCase )
if args.clip_gelu:
clip_gelu(_lowerCamelCase , args.clip_gelu )
# if args.local_rank in [-1, 0] and not calib:
print_quant_summary(_lowerCamelCase )
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
logger.info("""Enabling Calibration""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
module.disable_quant()
module.enable_calib()
else:
module.disable()
logger.info(F"""{name:80}: {module}""" )
def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
logger.info("""Loading calibrated amax""" )
for name, module in model.named_modules():
if name.endswith("""_quantizer""" ):
if module._calibrator is not None:
if isinstance(module._calibrator , calib.MaxCalibrator ):
module.load_calib_amax()
else:
module.load_calib_amax("""percentile""" , percentile=args.percentile )
module.enable_quant()
module.disable_calib()
else:
module.enable()
model.cuda()
print_quant_summary(_lowerCamelCase )
def _UpperCamelCase ( _A , _A ) -> Tuple:
"""simple docstring"""
def fusea(_A , _A , _A ):
for mod in [qq, qk, qv]:
if not hasattr(_lowerCamelCase , """_amax""" ):
print(""" WARNING: NO AMAX BUFFER""" )
return
_UpperCAmelCase = qq._amax.detach().item()
_UpperCAmelCase = qk._amax.detach().item()
_UpperCAmelCase = qv._amax.detach().item()
_UpperCAmelCase = max(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
qq._amax.fill_(_lowerCamelCase )
qk._amax.fill_(_lowerCamelCase )
qv._amax.fill_(_lowerCamelCase )
logger.info(F""" q={q:5.2f} k={k:5.2f} v={v:5.2f} -> {amax:5.2f}""" )
for name, mod in model.named_modules():
if name.endswith(""".attention.self""" ):
logger.info(F"""FUSE_QKV: {name:{name_width}}""" )
fusea(mod.matmul_q_input_quantizer , mod.matmul_k_input_quantizer , mod.matmul_v_input_quantizer )
if args.quant_per_tensor:
fusea(mod.query._weight_quantizer , mod.key._weight_quantizer , mod.value._weight_quantizer )
def _UpperCamelCase ( _A , _A ) -> Tuple:
"""simple docstring"""
for name, mod in model.named_modules():
if name.endswith(""".output.dense""" ) and not name.endswith("""attention.output.dense""" ):
_UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
mod._input_quantizer._amax.data.detach().clamp_(max=_lowerCamelCase )
_UpperCAmelCase = mod._input_quantizer._amax.data.detach().item()
logger.info(F"""CLIP_GELU: {name:{name_width}} amax: {amax_init:5.2f} -> {amax:5.2f}""" )
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , """_weight_quantizer""" ) and mod._weight_quantizer.axis is not None:
_UpperCAmelCase = mod.weight.shape[0]
_UpperCAmelCase = mod._weight_quantizer._amax.detach()
_UpperCAmelCase = torch.ones(_lowerCamelCase , dtype=amax.dtype , device=amax.device ) * amax
print(F"""expanding {name} {amax} -> {mod._weight_quantizer._amax}""" )
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , """_weight_quantizer""" ):
if not hasattr(mod.weight_quantizer , """_amax""" ):
print("""RECALIB: {name:{name_width}} WARNING: NO AMAX BUFFER""" )
continue
# determine which axes to reduce across
# e.g. a 4D tensor quantized per axis 0 should reduce over (1,2,3)
_UpperCAmelCase = set() if mod._weight_quantizer.axis is None else set(mod._weight_quantizer.axis )
_UpperCAmelCase = set(range(len(mod.weight.size() ) ) ) - axis_set
_UpperCAmelCase = pytorch_quantization.utils.reduce_amax(mod.weight , axis=_lowerCamelCase , keepdims=_lowerCamelCase ).detach()
logger.info(F"""RECALIB: {name:{name_width}} {mod._weight_quantizer._amax.flatten()} -> {amax.flatten()}""" )
_UpperCAmelCase = amax
def _UpperCamelCase ( _A , _A=2_5 , _A=1_8_0 , _A=None ) -> Dict:
"""simple docstring"""
if ignore is None:
_UpperCAmelCase = []
elif not isinstance(_lowerCamelCase , _lowerCamelCase ):
_UpperCAmelCase = [ignore]
_UpperCAmelCase = 0
for name, mod in model.named_modules():
if not hasattr(_lowerCamelCase , """weight""" ):
continue
_UpperCAmelCase = max(_lowerCamelCase , len(_lowerCamelCase ) )
for name, mod in model.named_modules():
_UpperCAmelCase = getattr(_lowerCamelCase , """_input_quantizer""" , _lowerCamelCase )
_UpperCAmelCase = getattr(_lowerCamelCase , """_weight_quantizer""" , _lowerCamelCase )
if not hasattr(_lowerCamelCase , """weight""" ):
continue
if type(_lowerCamelCase ) in ignore:
continue
if [True for s in ignore if type(_lowerCamelCase ) is str and s in name]:
continue
_UpperCAmelCase = F"""Act:{input_q.extra_repr()}"""
_UpperCAmelCase = F"""Wgt:{weight_q.extra_repr()}"""
_UpperCAmelCase = F"""{name:{name_width}} {act_str} {wgt_str}"""
if len(_lowerCamelCase ) <= line_width:
logger.info(_lowerCamelCase )
else:
logger.info(F"""{name:{name_width}} {act_str}""" )
logger.info(F"""{" ":{name_width}} {wgt_str}""" )
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = 0
for name, mod in model.named_modules():
if isinstance(_lowerCamelCase , pytorch_quantization.nn.TensorQuantizer ):
print(F"""{name:80} {mod}""" )
count += 1
print(F"""{count} TensorQuantizers found in model""" )
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = getattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
if quantizer_mod is not None:
assert hasattr(_lowerCamelCase , _lowerCamelCase )
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
else:
logger.warning(F"""{name} has no {quantizer}""" )
def _UpperCamelCase ( _A , _A , _A="both" , **_A ) -> str:
"""simple docstring"""
_UpperCAmelCase = F"""Warning: changing {which} quantizers of {name:{qname_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
if which in ["input", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , """_input_quantizer""" , _lowerCamelCase , _lowerCamelCase )
if which in ["weight", "both"]:
set_quantizer(_lowerCamelCase , _lowerCamelCase , """_weight_quantizer""" , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase )
def _UpperCamelCase ( _A , _A , **_A ) -> Optional[int]:
"""simple docstring"""
for name, mod in model.named_modules():
if hasattr(_lowerCamelCase , """_input_quantizer""" ) or hasattr(_lowerCamelCase , """_weight_quantizer""" ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
set_quantizers(_lowerCamelCase , _lowerCamelCase , **_lowerCamelCase )
elif name.endswith("""_quantizer""" ):
for n in names:
if re.search(_lowerCamelCase , _lowerCamelCase ):
_UpperCAmelCase = F"""Warning: changing {name:{name_width}}"""
for k, v in kwargs.items():
s += F""" {k}={v}"""
setattr(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
logger.info(_lowerCamelCase ) | 721 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> np.array:
"""simple docstring"""
_UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCAmelCase = np.zeros((n + 1,) )
_UpperCAmelCase = ya
_UpperCAmelCase = xa
for k in range(_A ):
_UpperCAmelCase = y[k] + step_size * ode_func(_A , y[k] )
_UpperCAmelCase = y[k] + (
(step_size / 2) * (ode_func(_A , y[k] ) + ode_func(x + step_size , _A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 19 | 0 |
"""simple docstring"""
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
a : str = logging.get_logger(__name__)
enable_full_determinism()
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
a : Union[str, Any] = UNetaDModel
a : Optional[Any] = """sample"""
@property
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
_UpperCAmelCase = torch.tensor([10] ).to(__UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def _snake_case ( self : Union[str, Any] ) ->List[str]:
'''simple docstring'''
return (3, 32, 32)
@property
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
return (3, 32, 32)
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = {
"""block_out_channels""": (32, 64),
"""down_block_types""": ("""DownBlock2D""", """AttnDownBlock2D"""),
"""up_block_types""": ("""AttnUpBlock2D""", """UpBlock2D"""),
"""attention_head_dim""": 3,
"""out_channels""": 3,
"""in_channels""": 3,
"""layers_per_block""": 2,
"""sample_size""": 32,
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
a : int = UNetaDModel
a : List[str] = """sample"""
@property
def _snake_case ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = 4
_UpperCAmelCase = 4
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
_UpperCAmelCase = torch.tensor([10] ).to(__UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def _snake_case ( self : List[Any] ) ->int:
'''simple docstring'''
return (4, 32, 32)
@property
def _snake_case ( self : Optional[Any] ) ->List[Any]:
'''simple docstring'''
return (4, 32, 32)
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = {
"""sample_size""": 32,
"""in_channels""": 4,
"""out_channels""": 4,
"""layers_per_block""": 2,
"""block_out_channels""": (32, 64),
"""attention_head_dim""": 32,
"""down_block_types""": ("""DownBlock2D""", """DownBlock2D"""),
"""up_block_types""": ("""UpBlock2D""", """UpBlock2D"""),
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__UpperCamelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def _snake_case ( self : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__UpperCamelCase )
model.to(__UpperCamelCase )
_UpperCAmelCase = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != """cuda""" , """This test is supposed to run on GPU""" )
def _snake_case ( self : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" , output_loading_info=__UpperCamelCase )
model_accelerate.to(__UpperCamelCase )
model_accelerate.eval()
_UpperCAmelCase = torch.randn(
1 , model_accelerate.config.in_channels , model_accelerate.config.sample_size , model_accelerate.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(__UpperCamelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__UpperCamelCase )
_UpperCAmelCase = model_accelerate(__UpperCamelCase , __UpperCamelCase )["""sample"""]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
_UpperCAmelCase ,_UpperCAmelCase = UNetaDModel.from_pretrained(
"""fusing/unet-ldm-dummy-update""" , output_loading_info=__UpperCamelCase , low_cpu_mem_usage=__UpperCamelCase )
model_normal_load.to(__UpperCamelCase )
model_normal_load.eval()
_UpperCAmelCase = model_normal_load(__UpperCamelCase , __UpperCamelCase )["""sample"""]
assert torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 )
def _snake_case ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/unet-ldm-dummy-update""" )
model.eval()
model.to(__UpperCamelCase )
_UpperCAmelCase = torch.randn(
1 , model.config.in_channels , model.config.sample_size , model.config.sample_size , generator=torch.manual_seed(0 ) , )
_UpperCAmelCase = noise.to(__UpperCamelCase )
_UpperCAmelCase = torch.tensor([10] * noise.shape[0] ).to(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ).sample
_UpperCAmelCase = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-1_3.3_2_5_8, -2_0.1_1_0_0, -1_5.9_8_7_3, -1_7.6_6_1_7, -2_3.0_5_9_6, -1_7.9_4_1_9, -1_3.3_6_7_5, -1_6.1_8_8_9, -1_2.3_8_0_0] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1e-3 ) )
class a_ ( snake_case_ , snake_case_ , unittest.TestCase ):
a : Dict = UNetaDModel
a : Tuple = """sample"""
@property
def _snake_case ( self : Tuple , __UpperCamelCase : List[str]=(32, 32) ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = floats_tensor((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
_UpperCAmelCase = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa , device=__UpperCamelCase )
return {"sample": noise, "timestep": time_step}
@property
def _snake_case ( self : Optional[Any] ) ->Tuple:
'''simple docstring'''
return (3, 32, 32)
@property
def _snake_case ( self : Any ) ->Optional[Any]:
'''simple docstring'''
return (3, 32, 32)
def _snake_case ( self : List[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = {
"""block_out_channels""": [32, 64, 64, 64],
"""in_channels""": 3,
"""layers_per_block""": 1,
"""out_channels""": 3,
"""time_embedding_type""": """fourier""",
"""norm_eps""": 1e-6,
"""mid_block_scale_factor""": math.sqrt(2.0 ),
"""norm_num_groups""": None,
"""down_block_types""": [
"""SkipDownBlock2D""",
"""AttnSkipDownBlock2D""",
"""SkipDownBlock2D""",
"""SkipDownBlock2D""",
],
"""up_block_types""": [
"""SkipUpBlock2D""",
"""SkipUpBlock2D""",
"""AttnSkipUpBlock2D""",
"""SkipUpBlock2D""",
],
}
_UpperCAmelCase = self.dummy_input
return init_dict, inputs_dict
@slow
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" , output_loading_info=__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
self.assertEqual(len(loading_info["""missing_keys"""] ) , 0 )
model.to(__UpperCamelCase )
_UpperCAmelCase = self.dummy_input
_UpperCAmelCase = floats_tensor((4, 3) + (2_56, 2_56) ).to(__UpperCamelCase )
_UpperCAmelCase = noise
_UpperCAmelCase = model(**__UpperCamelCase )
assert image is not None, "Make sure output is not None"
@slow
def _snake_case ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = UNetaDModel.from_pretrained("""google/ncsnpp-celebahq-256""" )
model.to(__UpperCamelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (2_56, 2_56)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-4_8_4_2.8_6_9_1, -6_4_9_9.6_6_3_1, -3_8_0_0.1_9_5_3, -7_9_7_8.2_6_8_6, -1_09_80.71_29, -2_00_28.85_35, 8_1_4_8.2_8_2_2, 2_3_4_2.2_9_0_5, 5_6_7.7_6_0_8] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1e-2 ) )
def _snake_case ( self : str ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = UNetaDModel.from_pretrained("""fusing/ncsnpp-ffhq-ve-dummy-update""" )
model.to(__UpperCamelCase )
_UpperCAmelCase = 4
_UpperCAmelCase = 3
_UpperCAmelCase = (32, 32)
_UpperCAmelCase = torch.ones((batch_size, num_channels) + sizes ).to(__UpperCamelCase )
_UpperCAmelCase = torch.tensor(batch_size * [1e-4] ).to(__UpperCamelCase )
with torch.no_grad():
_UpperCAmelCase = model(__UpperCamelCase , __UpperCamelCase ).sample
_UpperCAmelCase = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
_UpperCAmelCase = torch.tensor([-0.0_3_2_5, -0.0_9_0_0, -0.0_8_6_9, -0.0_3_3_2, -0.0_7_2_5, -0.0_2_7_0, -0.0_1_0_1, 0.0_2_2_7, 0.0_2_5_6] )
# fmt: on
self.assertTrue(torch_all_close(__UpperCamelCase , __UpperCamelCase , rtol=1e-2 ) )
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
pass | 700 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
a : List[str] = logging.getLogger(__name__)
a : int = {'''facebook/bart-base''': BartForConditionalGeneration}
a : Dict = {'''facebook/bart-base''': BartTokenizer}
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_A , default=_A , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_A , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_A , default=_A , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_A , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_A , )
parser.add_argument(
"""--config_name""" , type=_A , default=_A , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_A , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_A , default=_A , help="""Where to store the final ONNX file.""" )
_UpperCAmelCase = parser.parse_args()
return args
def _UpperCamelCase ( _A , _A="cpu" ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = model_dict[model_name].from_pretrained(_A ).to(_A )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_A )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_A ) )
with torch.no_grad():
_UpperCAmelCase = """My friends are cool but they eat too many carbs."""
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="""pt""" ).to(model.device )
_UpperCAmelCase = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_A , max_length=_A , early_stopping=_A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_A , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _A , opset_version=1_4 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_A , )
logger.info("""Model exported to {}""".format(_A ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_A ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_A ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_A )
_UpperCAmelCase = ort_sess.run(
_A , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_A ),
"""max_length""": np.array(_A ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase ,_UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _A )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_A )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_A , _A , _A , _A , _A )
if __name__ == "__main__":
main() | 19 | 0 |
"""simple docstring"""
from typing import List, Optional, Union
import torch
from transformers import (
XLMRobertaTokenizer,
)
from ...models import UNetaDConditionModel, VQModel
from ...pipelines import DiffusionPipeline
from ...pipelines.pipeline_utils import ImagePipelineOutput
from ...schedulers import DDIMScheduler, DDPMScheduler
from ...utils import (
is_accelerate_available,
is_accelerate_version,
logging,
randn_tensor,
replace_example_docstring,
)
from .text_encoder import MultilingualCLIP
a : Union[str, Any] = logging.get_logger(__name__) # pylint: disable=invalid-name
a : Union[str, Any] = '''\n Examples:\n ```py\n >>> from diffusers import KandinskyPipeline, KandinskyPriorPipeline\n >>> import torch\n\n >>> pipe_prior = KandinskyPriorPipeline.from_pretrained("kandinsky-community/Kandinsky-2-1-prior")\n >>> pipe_prior.to("cuda")\n\n >>> prompt = "red cat, 4k photo"\n >>> out = pipe_prior(prompt)\n >>> image_emb = out.image_embeds\n >>> negative_image_emb = out.negative_image_embeds\n\n >>> pipe = KandinskyPipeline.from_pretrained("kandinsky-community/kandinsky-2-1")\n >>> pipe.to("cuda")\n\n >>> image = pipe(\n ... prompt,\n ... image_embeds=image_emb,\n ... negative_image_embeds=negative_image_emb,\n ... height=768,\n ... width=768,\n ... num_inference_steps=100,\n ... ).images\n\n >>> image[0].save("cat.png")\n ```\n'''
def _UpperCamelCase ( _A , _A , _A=8 ) -> str:
"""simple docstring"""
_UpperCAmelCase = h // scale_factor**2
if h % scale_factor**2 != 0:
new_h += 1
_UpperCAmelCase = w // scale_factor**2
if w % scale_factor**2 != 0:
new_w += 1
return new_h * scale_factor, new_w * scale_factor
class a_ ( _UpperCAmelCase ):
def __init__( self : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Any , __UpperCamelCase : Any , ) ->int:
'''simple docstring'''
super().__init__()
self.register_modules(
text_encoder=lowercase__ , tokenizer=lowercase__ , unet=lowercase__ , scheduler=lowercase__ , movq=lowercase__ , )
_UpperCAmelCase = 2 ** (len(self.movq.config.block_out_channels ) - 1)
def _snake_case ( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] ) ->Dict:
'''simple docstring'''
if latents is None:
_UpperCAmelCase = randn_tensor(lowercase__ , generator=lowercase__ , device=lowercase__ , dtype=lowercase__ )
else:
if latents.shape != shape:
raise ValueError(f"""Unexpected latents shape, got {latents.shape}, expected {shape}""" )
_UpperCAmelCase = latents.to(lowercase__ )
_UpperCAmelCase = latents * scheduler.init_noise_sigma
return latents
def _snake_case ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : str , __UpperCamelCase : str=None , ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = len(lowercase__ ) if isinstance(lowercase__ , lowercase__ ) else 1
# get prompt text embeddings
_UpperCAmelCase = self.tokenizer(
lowercase__ , padding="""max_length""" , truncation=lowercase__ , max_length=77 , return_attention_mask=lowercase__ , add_special_tokens=lowercase__ , return_tensors="""pt""" , )
_UpperCAmelCase = text_inputs.input_ids
_UpperCAmelCase = self.tokenizer(lowercase__ , padding="""longest""" , return_tensors="""pt""" ).input_ids
if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(lowercase__ , lowercase__ ):
_UpperCAmelCase = self.tokenizer.batch_decode(untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] )
logger.warning(
"""The following part of your input was truncated because CLIP can only handle sequences up to"""
f""" {self.tokenizer.model_max_length} tokens: {removed_text}""" )
_UpperCAmelCase = text_input_ids.to(lowercase__ )
_UpperCAmelCase = text_inputs.attention_mask.to(lowercase__ )
_UpperCAmelCase = self.text_encoder(
input_ids=lowercase__ , attention_mask=lowercase__ )
_UpperCAmelCase = prompt_embeds.repeat_interleave(lowercase__ , dim=0 )
_UpperCAmelCase = text_encoder_hidden_states.repeat_interleave(lowercase__ , dim=0 )
_UpperCAmelCase = text_mask.repeat_interleave(lowercase__ , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase = 42
if negative_prompt is None:
_UpperCAmelCase = [""] * batch_size
elif type(lowercase__ ) is not type(lowercase__ ):
raise TypeError(
f"""`negative_prompt` should be the same type to `prompt`, but got {type(lowercase__ )} !="""
f""" {type(lowercase__ )}.""" )
elif isinstance(lowercase__ , lowercase__ ):
_UpperCAmelCase = [negative_prompt]
elif batch_size != len(lowercase__ ):
raise ValueError(
f"""`negative_prompt`: {negative_prompt} has batch size {len(lowercase__ )}, but `prompt`:"""
f""" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"""
""" the batch size of `prompt`.""" )
else:
_UpperCAmelCase = negative_prompt
_UpperCAmelCase = self.tokenizer(
lowercase__ , padding="""max_length""" , max_length=77 , truncation=lowercase__ , return_attention_mask=lowercase__ , add_special_tokens=lowercase__ , return_tensors="""pt""" , )
_UpperCAmelCase = uncond_input.input_ids.to(lowercase__ )
_UpperCAmelCase = uncond_input.attention_mask.to(lowercase__ )
_UpperCAmelCase = self.text_encoder(
input_ids=lowercase__ , attention_mask=lowercase__ )
# duplicate unconditional embeddings for each generation per prompt, using mps friendly method
_UpperCAmelCase = negative_prompt_embeds.shape[1]
_UpperCAmelCase = negative_prompt_embeds.repeat(1 , lowercase__ )
_UpperCAmelCase = negative_prompt_embeds.view(batch_size * num_images_per_prompt , lowercase__ )
_UpperCAmelCase = uncond_text_encoder_hidden_states.shape[1]
_UpperCAmelCase = uncond_text_encoder_hidden_states.repeat(1 , lowercase__ , 1 )
_UpperCAmelCase = uncond_text_encoder_hidden_states.view(
batch_size * num_images_per_prompt , lowercase__ , -1 )
_UpperCAmelCase = uncond_text_mask.repeat_interleave(lowercase__ , dim=0 )
# done duplicates
# For classifier free guidance, we need to do two forward passes.
# Here we concatenate the unconditional and text embeddings into a single batch
# to avoid doing two forward passes
_UpperCAmelCase = torch.cat([negative_prompt_embeds, prompt_embeds] )
_UpperCAmelCase = torch.cat([uncond_text_encoder_hidden_states, text_encoder_hidden_states] )
_UpperCAmelCase = torch.cat([uncond_text_mask, text_mask] )
return prompt_embeds, text_encoder_hidden_states, text_mask
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Union[str, Any]=0 ) ->str:
'''simple docstring'''
if is_accelerate_available():
from accelerate import cpu_offload
else:
raise ImportError("""Please install accelerate via `pip install accelerate`""" )
_UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
_UpperCAmelCase = [
self.unet,
self.text_encoder,
self.movq,
]
for cpu_offloaded_model in models:
if cpu_offloaded_model is not None:
cpu_offload(lowercase__ , lowercase__ )
def _snake_case ( self : int , __UpperCamelCase : List[str]=0 ) ->Optional[int]:
'''simple docstring'''
if is_accelerate_available() and is_accelerate_version(""">=""" , """0.17.0.dev0""" ):
from accelerate import cpu_offload_with_hook
else:
raise ImportError("""`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.""" )
_UpperCAmelCase = torch.device(f"""cuda:{gpu_id}""" )
if self.device.type != "cpu":
self.to("""cpu""" , silence_dtype_warnings=lowercase__ )
torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
_UpperCAmelCase = None
for cpu_offloaded_model in [self.text_encoder, self.unet, self.movq]:
_UpperCAmelCase = cpu_offload_with_hook(lowercase__ , lowercase__ , prev_module_hook=lowercase__ )
if self.safety_checker is not None:
_UpperCAmelCase = cpu_offload_with_hook(self.safety_checker , lowercase__ , prev_module_hook=lowercase__ )
# We'll offload the last model manually.
_UpperCAmelCase = hook
@property
# Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._execution_device
def _snake_case ( self : Optional[int] ) ->List[str]:
'''simple docstring'''
if not hasattr(self.unet , """_hf_hook""" ):
return self.device
for module in self.unet.modules():
if (
hasattr(lowercase__ , """_hf_hook""" )
and hasattr(module._hf_hook , """execution_device""" )
and module._hf_hook.execution_device is not None
):
return torch.device(module._hf_hook.execution_device )
return self.device
@torch.no_grad()
@replace_example_docstring(lowercase__ )
def __call__( self : str , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any = None , __UpperCamelCase : Any = 5_12 , __UpperCamelCase : Union[str, Any] = 5_12 , __UpperCamelCase : Union[str, Any] = 1_00 , __UpperCamelCase : Union[str, Any] = 4.0 , __UpperCamelCase : Optional[int] = 1 , __UpperCamelCase : int = None , __UpperCamelCase : Dict = None , __UpperCamelCase : Any = "pil" , __UpperCamelCase : int = True , ) ->Optional[Any]:
'''simple docstring'''
if isinstance(lowercase__ , lowercase__ ):
_UpperCAmelCase = 1
elif isinstance(lowercase__ , lowercase__ ):
_UpperCAmelCase = len(lowercase__ )
else:
raise ValueError(f"""`prompt` has to be of type `str` or `list` but is {type(lowercase__ )}""" )
_UpperCAmelCase = self._execution_device
_UpperCAmelCase = batch_size * num_images_per_prompt
_UpperCAmelCase = guidance_scale > 1.0
_UpperCAmelCase = self._encode_prompt(
lowercase__ , lowercase__ , lowercase__ , lowercase__ , lowercase__ )
if isinstance(lowercase__ , lowercase__ ):
_UpperCAmelCase = torch.cat(lowercase__ , dim=0 )
if isinstance(lowercase__ , lowercase__ ):
_UpperCAmelCase = torch.cat(lowercase__ , dim=0 )
if do_classifier_free_guidance:
_UpperCAmelCase = image_embeds.repeat_interleave(lowercase__ , dim=0 )
_UpperCAmelCase = negative_image_embeds.repeat_interleave(lowercase__ , dim=0 )
_UpperCAmelCase = torch.cat([negative_image_embeds, image_embeds] , dim=0 ).to(
dtype=prompt_embeds.dtype , device=lowercase__ )
self.scheduler.set_timesteps(lowercase__ , device=lowercase__ )
_UpperCAmelCase = self.scheduler.timesteps
_UpperCAmelCase = self.unet.config.in_channels
_UpperCAmelCase = get_new_h_w(lowercase__ , lowercase__ , self.movq_scale_factor )
# create initial latent
_UpperCAmelCase = self.prepare_latents(
(batch_size, num_channels_latents, height, width) , text_encoder_hidden_states.dtype , lowercase__ , lowercase__ , lowercase__ , self.scheduler , )
for i, t in enumerate(self.progress_bar(lowercase__ ) ):
# expand the latents if we are doing classifier free guidance
_UpperCAmelCase = torch.cat([latents] * 2 ) if do_classifier_free_guidance else latents
_UpperCAmelCase = {"text_embeds": prompt_embeds, "image_embeds": image_embeds}
_UpperCAmelCase = self.unet(
sample=lowercase__ , timestep=lowercase__ , encoder_hidden_states=lowercase__ , added_cond_kwargs=lowercase__ , return_dict=lowercase__ , )[0]
if do_classifier_free_guidance:
_UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
_UpperCAmelCase = noise_pred.chunk(2 )
_UpperCAmelCase = variance_pred.chunk(2 )
_UpperCAmelCase = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
_UpperCAmelCase = torch.cat([noise_pred, variance_pred_text] , dim=1 )
if not (
hasattr(self.scheduler.config , """variance_type""" )
and self.scheduler.config.variance_type in ["learned", "learned_range"]
):
_UpperCAmelCase = noise_pred.split(latents.shape[1] , dim=1 )
# compute the previous noisy sample x_t -> x_t-1
_UpperCAmelCase = self.scheduler.step(
lowercase__ , lowercase__ , lowercase__ , generator=lowercase__ , ).prev_sample
# post-processing
_UpperCAmelCase = self.movq.decode(lowercase__ , force_not_quantize=lowercase__ )["sample"]
if output_type not in ["pt", "np", "pil"]:
raise ValueError(f"""Only the output types `pt`, `pil` and `np` are supported not output_type={output_type}""" )
if output_type in ["np", "pil"]:
_UpperCAmelCase = image * 0.5 + 0.5
_UpperCAmelCase = image.clamp(0 , 1 )
_UpperCAmelCase = image.cpu().permute(0 , 2 , 3 , 1 ).float().numpy()
if output_type == "pil":
_UpperCAmelCase = self.numpy_to_pil(lowercase__ )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=lowercase__ ) | 701 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A , _A , _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = requests.get(_A , headers=_A , allow_redirects=_A )
_UpperCAmelCase = result.headers["""Location"""]
_UpperCAmelCase = requests.get(_A , allow_redirects=_A )
_UpperCAmelCase = os.path.join(_A , F"""{artifact_name}.zip""" )
with open(_A , """wb""" ) as fp:
fp.write(response.content )
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = None
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_A ) as f:
for line in f:
_UpperCAmelCase = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_UpperCAmelCase = line[: line.index(""": """ )]
_UpperCAmelCase = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_UpperCAmelCase = line[len("""FAILED """ ) :]
failed_tests.append(_A )
elif filename == "job_name.txt":
_UpperCAmelCase = line
if len(_A ) != len(_A ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(_A )} for `errors` """
F"""and {len(_A )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
_UpperCAmelCase = None
if job_name and job_links:
_UpperCAmelCase = job_links.get(_A , _A )
# A list with elements of the form (line of error, error, failed test)
_UpperCAmelCase = [x + [y] + [job_link] for x, y in zip(_A , _A )]
return result
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = [os.path.join(_A , _A ) for p in os.listdir(_A ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_A , job_links=_A ) )
return errors
def _UpperCamelCase ( _A , _A=None ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_UpperCAmelCase = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_UpperCAmelCase = test.split("""/""" )[2]
else:
_UpperCAmelCase = None
return test
def _UpperCamelCase ( _A , _A=None ) -> Any:
"""simple docstring"""
_UpperCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_UpperCAmelCase = [x for x in logs if x[2] is not None]
_UpperCAmelCase = {x[2] for x in logs}
_UpperCAmelCase = {}
for test in tests:
_UpperCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_UpperCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_UpperCAmelCase = {"""count""": n_errors, """errors""": error_counts}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = """| no. | error | status |"""
_UpperCAmelCase = """|-:|:-|:-|"""
_UpperCAmelCase = [header, sep]
for error in reduced_by_error:
_UpperCAmelCase = reduced_by_error[error]["""count"""]
_UpperCAmelCase = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(_A )
return "\n".join(_A )
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = """| model | no. of errors | major error | count |"""
_UpperCAmelCase = """|-:|-:|-:|-:|"""
_UpperCAmelCase = [header, sep]
for model in reduced_by_model:
_UpperCAmelCase = reduced_by_model[model]["""count"""]
_UpperCAmelCase ,_UpperCAmelCase = list(reduced_by_model[model]["""errors"""].items() )[0]
_UpperCAmelCase = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(_A )
return "\n".join(_A )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
a : Dict = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a : Tuple = get_job_links(args.workflow_run_id, token=args.token)
a : Tuple = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a : List[Any] = k.find(''' / ''')
a : Tuple = k[index + len(''' / ''') :]
a : int = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a : Union[str, Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a : Optional[int] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a : int = reduce_by_error(errors)
a : str = reduce_by_model(errors)
a : int = make_github_table(reduced_by_error)
a : Optional[int] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa) | 19 | 0 |
"""simple docstring"""
from typing import List, Optional
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : List[str] = logging.get_logger(__name__)
a : List[Any] = {
'''huggingface/autoformer-tourism-monthly''': '''https://huggingface.co/huggingface/autoformer-tourism-monthly/resolve/main/config.json''',
}
class a_ ( _UpperCAmelCase ):
a : List[str] = 'autoformer'
a : Optional[int] = {
'hidden_size': 'd_model',
'num_attention_heads': 'encoder_attention_heads',
'num_hidden_layers': 'encoder_layers',
}
def __init__( self : Optional[int] , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "student_t" , __UpperCamelCase : str = "nll" , __UpperCamelCase : int = 1 , __UpperCamelCase : List[int] = [1, 2, 3, 4, 5, 6, 7] , __UpperCamelCase : bool = True , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 , __UpperCamelCase : int = 0 , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : Optional[List[int]] = None , __UpperCamelCase : int = 64 , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 2 , __UpperCamelCase : int = 32 , __UpperCamelCase : int = 32 , __UpperCamelCase : str = "gelu" , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : float = 0.1 , __UpperCamelCase : int = 1_00 , __UpperCamelCase : float = 0.0_2 , __UpperCamelCase : bool = True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : int = 10 , __UpperCamelCase : int = 25 , __UpperCamelCase : int = 3 , **__UpperCamelCase : Optional[Any] , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = prediction_length
_UpperCAmelCase = context_length if context_length is not None else prediction_length
_UpperCAmelCase = distribution_output
_UpperCAmelCase = loss
_UpperCAmelCase = input_size
_UpperCAmelCase = num_time_features
_UpperCAmelCase = lags_sequence
_UpperCAmelCase = scaling
_UpperCAmelCase = num_dynamic_real_features
_UpperCAmelCase = num_static_real_features
_UpperCAmelCase = num_static_categorical_features
if cardinality is not None and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The cardinality should be a list of the same length as `num_static_categorical_features`""" )
_UpperCAmelCase = cardinality
else:
_UpperCAmelCase = [0]
if embedding_dimension is not None and num_static_categorical_features > 0:
if len(__UpperCamelCase ) != num_static_categorical_features:
raise ValueError(
"""The embedding dimension should be a list of the same length as `num_static_categorical_features`""" )
_UpperCAmelCase = embedding_dimension
else:
_UpperCAmelCase = [min(50 , (cat + 1) // 2 ) for cat in self.cardinality]
_UpperCAmelCase = num_parallel_samples
# Transformer architecture configuration
_UpperCAmelCase = input_size * len(self.lags_sequence ) + self._number_of_features
_UpperCAmelCase = d_model
_UpperCAmelCase = encoder_attention_heads
_UpperCAmelCase = decoder_attention_heads
_UpperCAmelCase = encoder_ffn_dim
_UpperCAmelCase = decoder_ffn_dim
_UpperCAmelCase = encoder_layers
_UpperCAmelCase = decoder_layers
_UpperCAmelCase = dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = encoder_layerdrop
_UpperCAmelCase = decoder_layerdrop
_UpperCAmelCase = activation_function
_UpperCAmelCase = init_std
_UpperCAmelCase = use_cache
# Autoformer
_UpperCAmelCase = label_length
_UpperCAmelCase = moving_average
_UpperCAmelCase = autocorrelation_factor
super().__init__(is_encoder_decoder=__UpperCamelCase , **__UpperCamelCase )
@property
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
return (
sum(self.embedding_dimension )
+ self.num_dynamic_real_features
+ self.num_time_features
+ self.num_static_real_features
+ self.input_size * 2 # the log1p(abs(loc)) and log(scale) features
) | 702 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( _UpperCAmelCase ):
a : Any = ['image_processor', 'tokenizer']
a : Optional[int] = 'AutoImageProcessor'
a : Any = 'AutoTokenizer'
def __init__( self : List[str] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=None , **__UpperCamelCase : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop("""feature_extractor""" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def __call__( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->List[str]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""images""" , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""text""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_UpperCAmelCase = self.image_processor(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if text is not None:
_UpperCAmelCase = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCAmelCase = encodings["""input_ids"""]
return inputs
def _snake_case ( self : Union[str, Any] , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ) ->Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ) ->int:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@contextmanager
def _snake_case ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer
yield
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def _snake_case ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Union[str, Any]=None ) ->List[str]:
'''simple docstring'''
if added_vocab is None:
_UpperCAmelCase = self.tokenizer.get_added_vocab()
_UpperCAmelCase = {}
while tokens:
_UpperCAmelCase = re.search(r"""<s_(.*?)>""" , __UpperCamelCase , re.IGNORECASE )
if start_token is None:
break
_UpperCAmelCase = start_token.group(1 )
_UpperCAmelCase = re.search(rf"""</s_{key}>""" , __UpperCamelCase , re.IGNORECASE )
_UpperCAmelCase = start_token.group()
if end_token is None:
_UpperCAmelCase = tokens.replace(__UpperCamelCase , """""" )
else:
_UpperCAmelCase = end_token.group()
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __UpperCamelCase , re.IGNORECASE )
if content is not None:
_UpperCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_UpperCAmelCase = self.tokenajson(__UpperCamelCase , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if value:
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = value[0]
_UpperCAmelCase = value
else: # leaf nodes
_UpperCAmelCase = []
for leaf in content.split(r"""<sep/>""" ):
_UpperCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_UpperCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCamelCase )
if len(output[key] ) == 1:
_UpperCAmelCase = output[key][0]
_UpperCAmelCase = tokens[tokens.find(__UpperCamelCase ) + len(__UpperCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if len(__UpperCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , )
return self.image_processor | 19 | 0 |
"""simple docstring"""
import csv
from collections import defaultdict
from dataclasses import dataclass, field
from typing import List, Optional
import matplotlib.pyplot as plt
import numpy as np
from matplotlib.ticker import ScalarFormatter
from transformers import HfArgumentParser
def _UpperCamelCase ( _A=None , _A=None ) -> Dict:
"""simple docstring"""
return field(default_factory=lambda: default , metadata=__lowercase )
@dataclass
class a_ :
a : Any = field(
metadata={'help': 'The csv file to plot.'} , )
a : str = field(
default=__A , metadata={'help': 'Whether to plot along batch size or sequence length. Defaults to sequence length.'} , )
a : Optional[Any] = field(
default=__A , metadata={'help': 'Whether the csv file has time results or memory results. Defaults to memory results.'} , )
a : int = field(
default=__A , metadata={'help': 'Disable logarithmic scale when plotting'} , )
a : Dict = field(
default=__A , metadata={
'help': 'Whether the csv file has training results or inference results. Defaults to inference results.'
} , )
a : int = field(
default=__A , metadata={'help': 'Filename under which the plot will be saved. If unused no plot is saved.'} , )
a : Tuple = list_field(
default=__A , metadata={'help': 'List of model names that are used instead of the ones in the csv file.'} )
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
try:
int(__lowercase )
return True
except ValueError:
return False
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
float(__lowercase )
return True
except ValueError:
return False
class a_ :
def __init__( self : Dict , __UpperCamelCase : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = args
_UpperCAmelCase = defaultdict(lambda: {"bsz": [], "seq_len": [], "result": {}} )
with open(self.args.csv_file , newline="""""" ) as csv_file:
_UpperCAmelCase = csv.DictReader(__UpperCamelCase )
for row in reader:
_UpperCAmelCase = row['model']
self.result_dict[model_name]["bsz"].append(int(row["""batch_size"""] ) )
self.result_dict[model_name]["seq_len"].append(int(row["""sequence_length"""] ) )
if can_convert_to_int(row["""result"""] ):
# value is not None
_UpperCAmelCase = int(row["""result"""] )
elif can_convert_to_float(row["""result"""] ):
# value is not None
_UpperCAmelCase = float(row["""result"""] )
def _snake_case ( self : int ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = plt.subplots()
_UpperCAmelCase = 'Time usage' if self.args.is_time else 'Memory usage'
_UpperCAmelCase = title_str + ' for training' if self.args.is_train else title_str + ' for inference'
if not self.args.no_log_scale:
# set logarithm scales
ax.set_xscale("""log""" )
ax.set_yscale("""log""" )
for axis in [ax.xaxis, ax.yaxis]:
axis.set_major_formatter(ScalarFormatter() )
for model_name_idx, model_name in enumerate(self.result_dict.keys() ):
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""bsz"""] ) )
_UpperCAmelCase = sorted(set(self.result_dict[model_name]["""seq_len"""] ) )
_UpperCAmelCase = self.result_dict[model_name]['result']
(_UpperCAmelCase) = (
(batch_sizes, sequence_lengths) if self.args.plot_along_batch else (sequence_lengths, batch_sizes)
)
_UpperCAmelCase = (
model_name if self.args.short_model_names is None else self.args.short_model_names[model_name_idx]
)
for inner_loop_value in inner_loop_array:
if self.args.plot_along_batch:
_UpperCAmelCase = np.asarray(
[results[(x, inner_loop_value)] for x in x_axis_array if (x, inner_loop_value) in results] , dtype=__UpperCamelCase , )
else:
_UpperCAmelCase = np.asarray(
[results[(inner_loop_value, x)] for x in x_axis_array if (inner_loop_value, x) in results] , dtype=np.floataa , )
(_UpperCAmelCase) = (
('batch_size', 'len') if self.args.plot_along_batch else ('in #tokens', 'bsz')
)
_UpperCAmelCase = np.asarray(__UpperCamelCase , __UpperCamelCase )[: len(__UpperCamelCase )]
plt.scatter(
__UpperCamelCase , __UpperCamelCase , label=f"""{label_model_name} - {inner_loop_label}: {inner_loop_value}""" )
plt.plot(__UpperCamelCase , __UpperCamelCase , """--""" )
title_str += f""" {label_model_name} vs."""
_UpperCAmelCase = title_str[:-4]
_UpperCAmelCase = 'Time in s' if self.args.is_time else 'Memory in MB'
# plot
plt.title(__UpperCamelCase )
plt.xlabel(__UpperCamelCase )
plt.ylabel(__UpperCamelCase )
plt.legend()
if self.args.figure_png_file is not None:
plt.savefig(self.args.figure_png_file )
else:
plt.show()
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser(__lowercase )
_UpperCAmelCase = parser.parse_args_into_dataclasses()[0]
_UpperCAmelCase = Plot(args=__lowercase )
plot.plot()
if __name__ == "__main__":
main() | 703 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( _A , _A , _A ) -> float:
"""simple docstring"""
_UpperCAmelCase = x
_UpperCAmelCase = y
for step in range(_A ): # noqa: B007
_UpperCAmelCase = a * a - b * b + x
_UpperCAmelCase = 2 * a * b + y
_UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_A , 1 , 1 ) )
def _UpperCamelCase ( _A = 8_0_0 , _A = 6_0_0 , _A = -0.6 , _A = 0 , _A = 3.2 , _A = 5_0 , _A = True , ) -> Image.Image:
"""simple docstring"""
_UpperCAmelCase = Image.new("""RGB""" , (image_width, image_height) )
_UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(_A ):
for image_y in range(_A ):
# determine the figure-coordinates based on the image-coordinates
_UpperCAmelCase = figure_width / image_width * image_height
_UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
_UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
_UpperCAmelCase = get_distance(_A , _A , _A )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_UpperCAmelCase = get_color_coded_rgb(_A )
else:
_UpperCAmelCase = get_black_and_white_rgb(_A )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 19 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : Dict = logging.get_logger(__name__)
class a_ ( __lowercase ):
a : Union[str, Any] = 'timm_backbone'
def __init__( self : int , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : int=3 , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : str=True , __UpperCamelCase : List[str]=None , **__UpperCamelCase : Union[str, Any] , ) ->List[str]:
'''simple docstring'''
super().__init__(**_A )
_UpperCAmelCase = backbone
_UpperCAmelCase = num_channels
_UpperCAmelCase = features_only
_UpperCAmelCase = use_pretrained_backbone
_UpperCAmelCase = True
_UpperCAmelCase = out_indices if out_indices is not None else (-1,) | 704 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a_ ( nn.Module ):
def __init__( self : List[str] , __UpperCamelCase : int = 16 , __UpperCamelCase : int = 88 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 32 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "geglu" , __UpperCamelCase : Optional[int] = None , ) ->Dict:
'''simple docstring'''
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__UpperCamelCase , attention_head_dim=__UpperCamelCase , in_channels=__UpperCamelCase , num_layers=__UpperCamelCase , dropout=__UpperCamelCase , norm_num_groups=__UpperCamelCase , cross_attention_dim=__UpperCamelCase , attention_bias=__UpperCamelCase , sample_size=__UpperCamelCase , num_vector_embeds=__UpperCamelCase , activation_fn=__UpperCamelCase , num_embeds_ada_norm=__UpperCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def _snake_case ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : bool = True , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase , cross_attention_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
a : Any = re.compile(r'''\s+''')
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(lowerCAmelCase__ , """""" , example["""content"""] ).encode("""utf-8""" ) ).hexdigest()}
def _UpperCamelCase ( _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = [len(lowerCAmelCase__ ) for line in example["""content"""].splitlines()]
return {"line_mean": np.mean(lowerCAmelCase__ ), "line_max": max(lowerCAmelCase__ )}
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = np.mean([c.isalnum() for c in example["""content"""]] )
return {"alpha_frac": alpha_frac}
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example["""hash"""] )
return True
else:
return False
def _UpperCamelCase ( _A , _A=5 ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = ["""auto-generated""", """autogenerated""", """automatically generated"""]
_UpperCAmelCase = example["""content"""].splitlines()
for _, line in zip(range(lowerCAmelCase__ ) , lowerCAmelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def _UpperCamelCase ( _A , _A=5 , _A=0.05 ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = ["""unit tests""", """test file""", """configuration file"""]
_UpperCAmelCase = example["""content"""].splitlines()
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# first test
for _, line in zip(range(lowerCAmelCase__ ) , lowerCAmelCase__ ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
_UpperCAmelCase = example["""content"""].count("""\n""" )
_UpperCAmelCase = int(coeff * nlines )
for line in lines:
count_config += line.lower().count("""config""" )
count_test += line.lower().count("""test""" )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = ["""def """, """class """, """for """, """while """]
_UpperCAmelCase = example["""content"""].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def _UpperCamelCase ( _A , _A=4 ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = example["""content"""].splitlines()
_UpperCAmelCase = 0
for line in lines:
counter += line.lower().count("""=""" )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = tokenizer(example["""content"""] , truncation=lowerCAmelCase__ )["""input_ids"""]
_UpperCAmelCase = len(example["""content"""] ) / len(lowerCAmelCase__ )
return {"ratio": ratio}
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {}
results.update(get_hash(lowerCAmelCase__ ) )
results.update(line_stats(lowerCAmelCase__ ) )
results.update(alpha_stats(lowerCAmelCase__ ) )
results.update(char_token_ratio(lowerCAmelCase__ ) )
results.update(is_autogenerated(lowerCAmelCase__ ) )
results.update(is_config_or_test(lowerCAmelCase__ ) )
results.update(has_no_keywords(lowerCAmelCase__ ) )
results.update(has_few_assignments(lowerCAmelCase__ ) )
return results
def _UpperCamelCase ( _A , _A , _A ) -> str:
"""simple docstring"""
if not check_uniques(lowerCAmelCase__ , lowerCAmelCase__ ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def _UpperCamelCase ( _A ) -> List[str]:
"""simple docstring"""
with open(lowerCAmelCase__ , """rb""" ) as f_in:
with gzip.open(str(lowerCAmelCase__ ) + """.gz""" , """wb""" , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCAmelCase__ , lowerCAmelCase__ )
os.unlink(lowerCAmelCase__ )
# Settings
a : Optional[int] = HfArgumentParser(PreprocessingArguments)
a : Optional[Any] = parser.parse_args()
if args.num_workers is None:
a : Dict = multiprocessing.cpu_count()
a : Tuple = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
a : Union[str, Any] = time.time()
a : Any = load_dataset(args.dataset_name, split='''train''')
print(F"Time to load dataset: {time.time()-t_start:.2f}")
# Run preprocessing
a : Union[str, Any] = time.time()
a : Any = ds.map(preprocess, num_proc=args.num_workers)
print(F"Time to preprocess dataset: {time.time()-t_start:.2f}")
# Deduplicate hashes
a : str = set(ds.unique('''hash'''))
a : Any = len(uniques) / len(ds)
print(F"Fraction of duplicates: {1-frac:.2%}")
# Deduplicate data and apply heuristics
a : List[Any] = time.time()
a : List[Any] = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F"Time to filter dataset: {time.time()-t_start:.2f}")
print(F"Size of filtered dataset: {len(ds_filter)}")
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
a : List[Any] = time.time()
a : List[Any] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F"Time to deduplicate dataset: {time.time()-t_start:.2f}")
print(F"Size of deduplicate dataset: {len(ds_filter)}")
# Save data in batches of samples_per_file
a : Optional[Any] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
a : int = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
a : List[Any] = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
a : Dict = str(data_dir / F"file-{file_number+1:012}.json")
a : Optional[Any] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F"Time to save dataset: {time.time()-t_start:.2f}") | 705 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( _A , _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = LxmertConfig.from_json_file(_A )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase = LxmertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_A , _A , _A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 19 | 0 |
from collections import defaultdict
from typing import Optional
from ..image_utils import load_image
from ..utils import (
add_end_docstrings,
is_torch_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, ChunkPipeline
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_MASK_GENERATION_MAPPING
a : List[str] = logging.get_logger(__name__)
@add_end_docstrings(__lowerCamelCase )
class a_ ( __lowerCamelCase ):
def __init__( self : str , **__UpperCamelCase : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**a_ )
requires_backends(self , """vision""" )
requires_backends(self , """torch""" )
if self.framework != "pt":
raise ValueError(f"""The {self.__class__} is only available in PyTorch.""" )
self.check_model_type(a_ )
def _snake_case ( self : List[str] , **__UpperCamelCase : int ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = {}
_UpperCAmelCase = {}
_UpperCAmelCase = {}
# preprocess args
if "points_per_batch" in kwargs:
_UpperCAmelCase = kwargs["points_per_batch"]
if "points_per_crop" in kwargs:
_UpperCAmelCase = kwargs["points_per_crop"]
if "crops_n_layers" in kwargs:
_UpperCAmelCase = kwargs["crops_n_layers"]
if "crop_overlap_ratio" in kwargs:
_UpperCAmelCase = kwargs["crop_overlap_ratio"]
if "crop_n_points_downscale_factor" in kwargs:
_UpperCAmelCase = kwargs["crop_n_points_downscale_factor"]
# postprocess args
if "pred_iou_thresh" in kwargs:
_UpperCAmelCase = kwargs["pred_iou_thresh"]
if "stability_score_offset" in kwargs:
_UpperCAmelCase = kwargs["stability_score_offset"]
if "mask_threshold" in kwargs:
_UpperCAmelCase = kwargs["mask_threshold"]
if "stability_score_thresh" in kwargs:
_UpperCAmelCase = kwargs["stability_score_thresh"]
if "crops_nms_thresh" in kwargs:
_UpperCAmelCase = kwargs["crops_nms_thresh"]
if "output_rle_mask" in kwargs:
_UpperCAmelCase = kwargs["output_rle_mask"]
if "output_bboxes_mask" in kwargs:
_UpperCAmelCase = kwargs["output_bboxes_mask"]
return preprocess_kwargs, forward_params, postprocess_kwargs
def __call__( self : Optional[Any] , __UpperCamelCase : List[str] , *__UpperCamelCase : List[Any] , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[Any]=None , **__UpperCamelCase : Tuple ) ->Union[str, Any]:
'''simple docstring'''
return super().__call__(a_ , *a_ , num_workers=a_ , batch_size=a_ , **a_ )
def _snake_case ( self : List[str] , __UpperCamelCase : int , __UpperCamelCase : Any=64 , __UpperCamelCase : str = 0 , __UpperCamelCase : Optional[int] = 5_12 / 15_00 , __UpperCamelCase : Any = 32 , __UpperCamelCase : Any = 1 , ) ->int:
'''simple docstring'''
_UpperCAmelCase = load_image(a_ )
_UpperCAmelCase = self.image_processor.size["longest_edge"]
_UpperCAmelCase = self.image_processor.generate_crop_boxes(
a_ , a_ , a_ , a_ , a_ , a_ )
_UpperCAmelCase = self.image_processor(images=a_ , return_tensors="""pt""" )
with self.device_placement():
if self.framework == "pt":
_UpperCAmelCase = self.get_inference_context()
with inference_context():
_UpperCAmelCase = self._ensure_tensor_on_device(a_ , device=self.device )
_UpperCAmelCase = self.model.get_image_embeddings(model_inputs.pop("""pixel_values""" ) )
_UpperCAmelCase = image_embeddings
_UpperCAmelCase = grid_points.shape[1]
_UpperCAmelCase = points_per_batch if points_per_batch is not None else n_points
if points_per_batch <= 0:
raise ValueError(
"""Cannot have points_per_batch<=0. Must be >=1 to returned batched outputs. """
"""To return all points at once, set points_per_batch to None""" )
for i in range(0 , a_ , a_ ):
_UpperCAmelCase = grid_points[:, i : i + points_per_batch, :, :]
_UpperCAmelCase = input_labels[:, i : i + points_per_batch]
_UpperCAmelCase = i == n_points - points_per_batch
yield {
"input_points": batched_points,
"input_labels": labels,
"input_boxes": crop_boxes,
"is_last": is_last,
**model_inputs,
}
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : str=0.8_8 , __UpperCamelCase : Optional[int]=0.9_5 , __UpperCamelCase : Optional[Any]=0 , __UpperCamelCase : str=1 , ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = model_inputs.pop("""input_boxes""" )
_UpperCAmelCase = model_inputs.pop("""is_last""" )
_UpperCAmelCase = model_inputs.pop("""original_sizes""" ).tolist()
_UpperCAmelCase = model_inputs.pop("""reshaped_input_sizes""" ).tolist()
_UpperCAmelCase = self.model(**a_ )
# post processing happens here in order to avoid CPU GPU copies of ALL the masks
_UpperCAmelCase = model_outputs["pred_masks"]
_UpperCAmelCase = self.image_processor.post_process_masks(
a_ , a_ , a_ , a_ , binarize=a_ )
_UpperCAmelCase = model_outputs["iou_scores"]
_UpperCAmelCase = self.image_processor.filter_masks(
masks[0] , iou_scores[0] , original_sizes[0] , input_boxes[0] , a_ , a_ , a_ , a_ , )
return {
"masks": masks,
"is_last": is_last,
"boxes": boxes,
"iou_scores": iou_scores,
}
def _snake_case ( self : Dict , __UpperCamelCase : int , __UpperCamelCase : str=False , __UpperCamelCase : Optional[int]=False , __UpperCamelCase : List[str]=0.7 , ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = []
for model_output in model_outputs:
all_scores.append(model_output.pop("""iou_scores""" ) )
all_masks.extend(model_output.pop("""masks""" ) )
all_boxes.append(model_output.pop("""boxes""" ) )
_UpperCAmelCase = torch.cat(a_ )
_UpperCAmelCase = torch.cat(a_ )
_UpperCAmelCase = self.image_processor.post_process_for_mask_generation(
a_ , a_ , a_ , a_ )
_UpperCAmelCase = defaultdict(a_ )
for output in model_outputs:
for k, v in output.items():
extra[k].append(a_ )
_UpperCAmelCase = {}
if output_rle_mask:
_UpperCAmelCase = rle_mask
if output_bboxes_mask:
_UpperCAmelCase = bounding_boxes
return {"masks": output_masks, "scores": iou_scores, **optional, **extra} | 706 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
a : str = '''examples/'''
a : List[str] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a : Tuple = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
a : List[str] = '''README.md'''
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase ,_UpperCAmelCase = REPLACE_PATTERNS[pattern]
_UpperCAmelCase = replace.replace("""VERSION""" , _A )
_UpperCAmelCase = re_pattern.sub(_A , _A )
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_A )
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern="""examples""" )
def _UpperCamelCase ( _A , _A=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
_UpperCAmelCase = """🤗 Transformers currently provides the following architectures"""
_UpperCAmelCase = """1. Want to contribute a new model?"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase = f.readlines()
# Find the start of the list.
_UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
_UpperCAmelCase = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_A )
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = REPLACE_PATTERNS["""init"""][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def _UpperCamelCase ( _A=False ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
_UpperCAmelCase = default_version.base_version
elif patch:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_UpperCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
_UpperCAmelCase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
def _UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_version()
_UpperCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
_UpperCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
_UpperCAmelCase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_A )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work() | 19 | 0 |
"""simple docstring"""
import unittest
from transformers import MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING, is_vision_available
from transformers.pipelines import pipeline
from transformers.testing_utils import (
is_pipeline_test,
nested_simplify,
require_tf,
require_torch,
require_vision,
slow,
)
from .test_pipelines_common import ANY
if is_vision_available():
from PIL import Image
else:
class a_ :
@staticmethod
def _snake_case ( *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : int ) ->str:
'''simple docstring'''
pass
@is_pipeline_test
@require_torch
@require_vision
class a_ ( unittest.TestCase ):
a : int = MODEL_FOR_VISUAL_QUESTION_ANSWERING_MAPPING
def _snake_case ( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_UpperCAmelCase = [
{
"""image""": Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" ),
"""question""": """How many cats are there?""",
},
{
"""image""": """./tests/fixtures/tests_samples/COCO/000000039769.png""",
"""question""": """How many cats are there?""",
},
]
return vqa_pipeline, examples
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = vqa_pipeline(__UpperCamelCase , top_k=1 )
self.assertEqual(
__UpperCamelCase , [
[{"""score""": ANY(__UpperCamelCase ), """answer""": ANY(__UpperCamelCase )}],
[{"""score""": ANY(__UpperCamelCase ), """answer""": ANY(__UpperCamelCase )}],
] , )
@require_torch
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = pipeline("""visual-question-answering""" , model="""hf-internal-testing/tiny-vilt-random-vqa""" )
_UpperCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_UpperCAmelCase = """How many cats are there?"""
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question="""How many cats are there?""" , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"""score""": ANY(__UpperCamelCase ), """answer""": ANY(__UpperCamelCase )}, {"""score""": ANY(__UpperCamelCase ), """answer""": ANY(__UpperCamelCase )}] )
_UpperCAmelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
__UpperCamelCase , [{"""score""": ANY(__UpperCamelCase ), """answer""": ANY(__UpperCamelCase )}, {"""score""": ANY(__UpperCamelCase ), """answer""": ANY(__UpperCamelCase )}] )
@slow
@require_torch
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
_UpperCAmelCase = pipeline("""visual-question-answering""" , model="""dandelin/vilt-b32-finetuned-vqa""" )
_UpperCAmelCase = """./tests/fixtures/tests_samples/COCO/000000039769.png"""
_UpperCAmelCase = """How many cats are there?"""
_UpperCAmelCase = vqa_pipeline(image=__UpperCamelCase , question=__UpperCamelCase , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"""score""": 0.8_7_9_9, """answer""": """2"""}, {"""score""": 0.2_9_6, """answer""": """1"""}] )
_UpperCAmelCase = vqa_pipeline({"""image""": image, """question""": question} , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [{"""score""": 0.8_7_9_9, """answer""": """2"""}, {"""score""": 0.2_9_6, """answer""": """1"""}] )
_UpperCAmelCase = vqa_pipeline(
[{"""image""": image, """question""": question}, {"""image""": image, """question""": question}] , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=4 ) , [[{"""score""": 0.8_7_9_9, """answer""": """2"""}, {"""score""": 0.2_9_6, """answer""": """1"""}]] * 2 , )
@require_tf
@unittest.skip("""Visual question answering not implemented in TF""" )
def _snake_case ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
pass | 707 |
"""simple docstring"""
from __future__ import annotations
def _UpperCamelCase ( _A ) -> None:
"""simple docstring"""
create_state_space_tree(_A , [] , 0 , [0 for i in range(len(_A ) )] )
def _UpperCamelCase ( _A , _A , _A , _A , ) -> None:
"""simple docstring"""
if index == len(_A ):
print(_A )
return
for i in range(len(_A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_UpperCAmelCase = True
create_state_space_tree(_A , _A , index + 1 , _A )
current_sequence.pop()
_UpperCAmelCase = False
a : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
a : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 19 | 0 |
"""simple docstring"""
import os
import zipfile
import pytest
from datasets.utils.extract import (
BzipaExtractor,
Extractor,
GzipExtractor,
LzaExtractor,
SevenZipExtractor,
TarExtractor,
XzExtractor,
ZipExtractor,
ZstdExtractor,
)
from .utils import require_lza, require_pyazr, require_zstandard
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def _UpperCamelCase ( _A , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A , ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = {
"""7z""": (seven_zip_file, SevenZipExtractor),
"""bz2""": (bza_file, BzipaExtractor),
"""gzip""": (gz_file, GzipExtractor),
"""lz4""": (lza_file, LzaExtractor),
"""tar""": (tar_file, TarExtractor),
"""xz""": (xz_file, XzExtractor),
"""zip""": (zip_file, ZipExtractor),
"""zstd""": (zstd_file, ZstdExtractor),
}
_UpperCAmelCase ,_UpperCAmelCase = input_paths_and_base_extractors[compression_format]
if input_path is None:
_UpperCAmelCase = F"""for \'{compression_format}\' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_A )
assert base_extractor.is_extractable(_A )
_UpperCAmelCase = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
base_extractor.extract(_A , _A )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_UpperCAmelCase = file_path.read_text(encoding="""utf-8""" )
else:
_UpperCAmelCase = output_path.read_text(encoding="""utf-8""" )
_UpperCAmelCase = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.mark.parametrize(
"""compression_format, is_archive""" , [
("""7z""", True),
("""bz2""", False),
("""gzip""", False),
("""lz4""", False),
("""tar""", True),
("""xz""", False),
("""zip""", True),
("""zstd""", False),
] , )
def _UpperCamelCase ( _A , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A , ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = {
"""7z""": seven_zip_file,
"""bz2""": bza_file,
"""gzip""": gz_file,
"""lz4""": lza_file,
"""tar""": tar_file,
"""xz""": xz_file,
"""zip""": zip_file,
"""zstd""": zstd_file,
}
_UpperCAmelCase = input_paths[compression_format]
if input_path is None:
_UpperCAmelCase = F"""for \'{compression_format}\' compression_format, """
if compression_format == "7z":
reason += require_pyazr.kwargs["reason"]
elif compression_format == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_format == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(_A )
_UpperCAmelCase = Extractor.infer_extractor_format(_A )
assert extractor_format is not None
_UpperCAmelCase = tmp_path / ("""extracted""" if is_archive else """extracted.txt""")
Extractor.extract(_A , _A , _A )
if is_archive:
assert output_path.is_dir()
for file_path in output_path.iterdir():
assert file_path.name == text_file.name
_UpperCAmelCase = file_path.read_text(encoding="""utf-8""" )
else:
_UpperCAmelCase = output_path.read_text(encoding="""utf-8""" )
_UpperCAmelCase = text_file.read_text(encoding="""utf-8""" )
assert extracted_file_content == expected_file_content
@pytest.fixture
def _UpperCamelCase ( _A , _A ) -> Tuple:
"""simple docstring"""
import tarfile
_UpperCAmelCase = tmp_path / """data_dot_dot"""
directory.mkdir()
_UpperCAmelCase = directory / """tar_file_with_dot_dot.tar"""
with tarfile.TarFile(_A , """w""" ) as f:
f.add(_A , arcname=os.path.join("""..""" , text_file.name ) )
return path
@pytest.fixture
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
import tarfile
_UpperCAmelCase = tmp_path / """data_sym_link"""
directory.mkdir()
_UpperCAmelCase = directory / """tar_file_with_sym_link.tar"""
os.symlink("""..""" , directory / """subdir""" , target_is_directory=_A )
with tarfile.TarFile(_A , """w""" ) as f:
f.add(str(directory / """subdir""" ) , arcname="""subdir""" ) # str required by os.readlink on Windows and Python < 3.8
return path
@pytest.mark.parametrize(
"""insecure_tar_file, error_log""" , [("""tar_file_with_dot_dot""", """illegal path"""), ("""tar_file_with_sym_link""", """Symlink""")] , )
def _UpperCamelCase ( _A , _A , _A , _A , _A , _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = {
"""tar_file_with_dot_dot""": tar_file_with_dot_dot,
"""tar_file_with_sym_link""": tar_file_with_sym_link,
}
_UpperCAmelCase = insecure_tar_files[insecure_tar_file]
_UpperCAmelCase = tmp_path / """extracted"""
TarExtractor.extract(_A , _A )
assert caplog.text
for record in caplog.records:
assert record.levelname == "ERROR"
assert error_log in record.msg
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = tmpdir / """not_a_zip_file"""
# From: https://github.com/python/cpython/pull/5053
_UpperCAmelCase = (
b"""\x89PNG\r\n\x1a\n\x00\x00\x00\rIHDR\x00\x00\x00\x01\x00\x00"""
b"""\x00\x02\x08\x06\x00\x00\x00\x99\x81\xb6'\x00\x00\x00\x15I"""
b"""DATx\x01\x01\n\x00\xf5\xff\x00PK\x05\x06\x00PK\x06\x06\x07"""
b"""\xac\x01N\xc6|a\r\x00\x00\x00\x00IEND\xaeB`\x82"""
)
with not_a_zip_file.open("""wb""" ) as f:
f.write(_A )
assert zipfile.is_zipfile(str(_A ) ) # is a false positive for `zipfile`
assert not ZipExtractor.is_extractable(_A ) # but we're right | 708 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : int=32 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : Dict=3 , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=32 , __UpperCamelCase : Any=4 , __UpperCamelCase : Optional[int]=[0, 1, 2, 3] , __UpperCamelCase : str=4 , __UpperCamelCase : Optional[Any]=37 , __UpperCamelCase : str="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Any=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : int=[1, 3_84, 24, 24] , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any=None , ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = backbone_out_indices
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = backbone_featmap_shape
_UpperCAmelCase = scope
_UpperCAmelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 1_92, 3_84, 7_68],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _snake_case ( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _snake_case ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a : int = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : str = False
a : List[str] = False
a : Dict = False
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = DPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def _snake_case ( self : Optional[int] ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
pass
def _snake_case ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def _snake_case ( self : str ) ->Any:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ):
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = False
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
_UpperCAmelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_UpperCAmelCase = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self : Dict ) ->Tuple:
'''simple docstring'''
pass
@slow
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_UpperCAmelCase = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = """add"""
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class a_ ( unittest.TestCase ):
def _snake_case ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
_UpperCAmelCase = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
_UpperCAmelCase = outputs.predicted_depth
# verify the predicted depth
_UpperCAmelCase = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , __UpperCamelCase , atol=1e-4 ) ) | 19 | 0 |
"""simple docstring"""
import os
import unittest
from transformers import FunnelTokenizer, FunnelTokenizerFast
from transformers.models.funnel.tokenization_funnel import VOCAB_FILES_NAMES
from transformers.testing_utils import require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_tokenizers
class a_ ( _A , unittest.TestCase ):
a : str = FunnelTokenizer
a : List[Any] = FunnelTokenizerFast
a : List[Any] = True
a : Optional[int] = True
def _snake_case ( self : List[Any] ) ->str:
'''simple docstring'''
super().setUp()
_UpperCAmelCase = [
"""<unk>""",
"""<cls>""",
"""<sep>""",
"""want""",
"""##want""",
"""##ed""",
"""wa""",
"""un""",
"""runn""",
"""##ing""",
""",""",
"""low""",
"""lowest""",
]
_UpperCAmelCase = os.path.join(self.tmpdirname , VOCAB_FILES_NAMES["""vocab_file"""] )
with open(self.vocab_file , """w""" , encoding="""utf-8""" ) as vocab_writer:
vocab_writer.write("""""".join([x + """\n""" for x in vocab_tokens] ) )
def _snake_case ( self : Dict , **__UpperCamelCase : Tuple ) ->Union[str, Any]:
'''simple docstring'''
return FunnelTokenizer.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _snake_case ( self : str , **__UpperCamelCase : str ) ->List[str]:
'''simple docstring'''
return FunnelTokenizerFast.from_pretrained(self.tmpdirname , **UpperCamelCase__ )
def _snake_case ( self : List[str] , __UpperCamelCase : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = """UNwant\u00E9d,running"""
_UpperCAmelCase = """unwanted, running"""
return input_text, output_text
def _snake_case ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.tokenizer_class(self.vocab_file )
_UpperCAmelCase = tokenizer.tokenize("""UNwant\u00E9d,running""" )
self.assertListEqual(UpperCamelCase__ , ["""un""", """##want""", """##ed""", """,""", """runn""", """##ing"""] )
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , [7, 4, 5, 10, 8, 9] )
def _snake_case ( self : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.get_tokenizers(do_lower_case=UpperCamelCase__ )
for tokenizer in tokenizers:
_UpperCAmelCase = tokenizer("""UNwant\u00E9d,running""" )
_UpperCAmelCase = len(inputs["""input_ids"""] ) - 1
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len )
_UpperCAmelCase = tokenizer("""UNwant\u00E9d,running""" , """UNwant\u00E9d,running""" )
self.assertListEqual(inputs["""token_type_ids"""] , [2] + [0] * sentence_len + [1] * sentence_len ) | 709 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a : List[str] = logging.get_logger(__name__)
class a_ ( enum.Enum ):
a : Optional[Any] = 0
a : Dict = 1
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'generated'
def __init__( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : str ) ->Any:
'''simple docstring'''
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _snake_case ( self : Optional[int] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Dict=None , **__UpperCamelCase : Any , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
if truncation is not None:
_UpperCAmelCase = truncation
_UpperCAmelCase = generate_kwargs
_UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
_UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCAmelCase = self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
_UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _snake_case ( self : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
return True
def _snake_case ( self : Optional[Any] , *__UpperCamelCase : Any , __UpperCamelCase : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
_UpperCAmelCase = ([prefix + arg for arg in args[0]],)
_UpperCAmelCase = True
elif isinstance(args[0] , __UpperCamelCase ):
_UpperCAmelCase = (prefix + args[0],)
_UpperCAmelCase = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
_UpperCAmelCase = self.tokenizer(*__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Dict , *__UpperCamelCase : str , **__UpperCamelCase : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = super().__call__(*__UpperCamelCase , **__UpperCamelCase )
if (
isinstance(args[0] , __UpperCamelCase )
and all(isinstance(__UpperCamelCase , __UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _snake_case ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : str=TruncationStrategy.DO_NOT_TRUNCATE , **__UpperCamelCase : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self._parse_and_tokenize(__UpperCamelCase , truncation=__UpperCamelCase , **__UpperCamelCase )
return inputs
def _snake_case ( self : str , __UpperCamelCase : Dict , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
if self.framework == "pt":
_UpperCAmelCase ,_UpperCAmelCase = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
_UpperCAmelCase ,_UpperCAmelCase = tf.shape(model_inputs["""input_ids"""] ).numpy()
_UpperCAmelCase = generate_kwargs.get("""min_length""" , self.model.config.min_length )
_UpperCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__UpperCamelCase , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
_UpperCAmelCase = self.model.generate(**__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCAmelCase = output_ids.reshape(__UpperCamelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
_UpperCAmelCase = tf.reshape(__UpperCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any]=ReturnType.TEXT , __UpperCamelCase : int=False ) ->Any:
'''simple docstring'''
_UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCAmelCase = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
_UpperCAmelCase = {
f"""{self.return_name}_text""": self.tokenizer.decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , )
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'summary'
def __call__( self : Optional[Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[int] ) ->Any:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : str , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : Optional[int] = 'translation'
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def _snake_case ( self : Tuple , *__UpperCamelCase : List[str] , __UpperCamelCase : Tuple=TruncationStrategy.DO_NOT_TRUNCATE , __UpperCamelCase : Tuple=None , __UpperCamelCase : Union[str, Any]=None ) ->Tuple:
'''simple docstring'''
if getattr(self.tokenizer , """_build_translation_inputs""" , __UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase , return_tensors=self.framework , truncation=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase , truncation=__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : int=None , __UpperCamelCase : int=None , **__UpperCamelCase : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
_UpperCAmelCase = src_lang
if tgt_lang is not None:
_UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCAmelCase = kwargs.get("""task""" , self.task )
_UpperCAmelCase = task.split("""_""" )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
_UpperCAmelCase = items[1]
_UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->int:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = ''
for word_or_phrase in separated:
if not isinstance(_A , _A ):
raise Exception("""join() accepts only strings to be joined""" )
joined += word_or_phrase + separator
return joined.strip(_A )
if __name__ == "__main__":
from doctest import testmod
testmod() | 710 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
@property
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__UpperCamelCase )
def _snake_case ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.dummy_uncond_unet
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = self.dummy_vq_model
_UpperCAmelCase = LDMPipeline(unet=__UpperCamelCase , vqvae=__UpperCamelCase , scheduler=__UpperCamelCase )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" ).images
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" , return_dict=__UpperCamelCase )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a_ ( unittest.TestCase ):
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=5 , output_type="""numpy""" ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCAmelCase = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 19 | 0 |
"""simple docstring"""
from __future__ import annotations
import math
def _UpperCamelCase ( _A ) -> bool:
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(__snake_case ) + 1 ) , 6 ):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
a : Union[str, Any] = [num for num in range(3, 1_0_0_0_0_1, 2) if not is_prime(num)]
def _UpperCamelCase ( _A ) -> list[int]:
if not isinstance(__snake_case , __snake_case ):
raise ValueError("""n must be an integer""" )
if n <= 0:
raise ValueError("""n must be >= 0""" )
_UpperCAmelCase = []
for num in range(len(__snake_case ) ):
_UpperCAmelCase = 0
while 2 * i * i <= odd_composites[num]:
_UpperCAmelCase = odd_composites[num] - 2 * i * i
if is_prime(__snake_case ):
break
i += 1
else:
list_nums.append(odd_composites[num] )
if len(__snake_case ) == n:
return list_nums
return []
def _UpperCamelCase ( ) -> int:
return compute_nums(1 )[0]
if __name__ == "__main__":
print(F"{solution() = }") | 711 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
a : str = True
except (ImportError, ModuleNotFoundError):
a : List[str] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
re.sub("""<n>""" , """""" , _A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_A ) ) | 19 | 0 |
"""simple docstring"""
def _UpperCamelCase ( _A = 1_0**9 ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = 1
_UpperCAmelCase = 2
_UpperCAmelCase = 0
_UpperCAmelCase = 0
_UpperCAmelCase = 0
while perimeter <= max_perimeter:
perimeters_sum += perimeter
prev_value += 2 * value
value += prev_value
_UpperCAmelCase = 2 * value + 2 if i % 2 == 0 else 2 * value - 2
i += 1
return perimeters_sum
if __name__ == "__main__":
print(F"{solution() = }") | 712 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a : str = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a_ :
a : List[Any] = PegasusConfig
a : Dict = {}
a : List[Any] = 'gelu'
def __init__( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple=13 , __UpperCamelCase : Tuple=7 , __UpperCamelCase : Tuple=True , __UpperCamelCase : Any=False , __UpperCamelCase : Any=99 , __UpperCamelCase : Optional[int]=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Dict=37 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Optional[Any]=20 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Tuple=0 , ) ->int:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def _snake_case ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _snake_case ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _UpperCamelCase ( _A , _A , _A , _A=None , _A=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = np.not_equal(_A , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a_ ( _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a : Any = True
a : int = False
a : Union[str, Any] = False
a : Optional[int] = False
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase )
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(__UpperCamelCase : List[Any] , __UpperCamelCase : str=None , **__UpperCamelCase : int ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_UpperCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self : int ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__UpperCamelCase )
_UpperCAmelCase = np.ones((1, 1) )
_UpperCAmelCase = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_UpperCAmelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""np""" , truncation=__UpperCamelCase , max_length=5_12 , padding=__UpperCamelCase )
_UpperCAmelCase = model.generate(**__UpperCamelCase , num_beams=2 ).sequences
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert tgt_text == decoded | 19 | 0 |
"""simple docstring"""
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
if not head:
return True
# split the list to two parts
_UpperCAmelCase ,_UpperCAmelCase = head.next, head
while fast and fast.next:
_UpperCAmelCase = fast.next.next
_UpperCAmelCase = slow.next
_UpperCAmelCase = slow.next
_UpperCAmelCase = None # Don't forget here! But forget still works!
# reverse the second part
_UpperCAmelCase = None
while second:
_UpperCAmelCase = second.next
_UpperCAmelCase = node
_UpperCAmelCase = second
_UpperCAmelCase = nxt
# compare two parts
# second part has the same or one less node
while node:
if node.val != head.val:
return False
_UpperCAmelCase = node.next
_UpperCAmelCase = head.next
return True
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
if not head or not head.next:
return True
# 1. Get the midpoint (slow)
_UpperCAmelCase = _UpperCAmelCase = _UpperCAmelCase = head
while fast and fast.next:
_UpperCAmelCase ,_UpperCAmelCase = fast.next.next, slow.next
# 2. Push the second half into the stack
_UpperCAmelCase = [slow.val]
while slow.next:
_UpperCAmelCase = slow.next
stack.append(slow.val )
# 3. Comparison
while stack:
if stack.pop() != cur.val:
return False
_UpperCAmelCase = cur.next
return True
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
if not head or not head.next:
return True
_UpperCAmelCase = {}
_UpperCAmelCase = 0
while head:
if head.val in d:
d[head.val].append(A__ )
else:
_UpperCAmelCase = [pos]
_UpperCAmelCase = head.next
pos += 1
_UpperCAmelCase = pos - 1
_UpperCAmelCase = 0
for v in d.values():
if len(A__ ) % 2 != 0:
middle += 1
else:
_UpperCAmelCase = 0
for i in range(0 , len(A__ ) ):
if v[i] + v[len(A__ ) - 1 - step] != checksum:
return False
step += 1
if middle > 1:
return False
return True | 713 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : Optional[int]=13 , __UpperCamelCase : List[str]=7 , __UpperCamelCase : List[Any]=9 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : str=True , __UpperCamelCase : int=False , __UpperCamelCase : int=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : List[Any]=8 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : str=0.0_0_2 , __UpperCamelCase : Union[str, Any]=1 , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : Tuple=0 , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Any=None , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = encoder_seq_length
_UpperCAmelCase = decoder_seq_length
# For common tests
_UpperCAmelCase = self.decoder_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = d_ff
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = decoder_start_token_id
_UpperCAmelCase = None
_UpperCAmelCase = decoder_layers
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
return TaConfig.from_pretrained("""google/umt5-base""" )
def _snake_case ( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : str=None , ) ->int:
'''simple docstring'''
if attention_mask is None:
_UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCamelCase )
if decoder_head_mask is None:
_UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
if cross_attn_head_mask is None:
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = config.num_attention_heads
_UpperCAmelCase = self.prepare_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, input_dict
def _snake_case ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : Tuple ) ->Dict:
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , )
_UpperCAmelCase = model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase )
_UpperCAmelCase = result.last_hidden_state
_UpperCAmelCase = result.past_key_values
_UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , ) ->str:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).get_decoder().to(__UpperCamelCase ).eval()
# first forward pass
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) + 1 )
_UpperCAmelCase ,_UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = model(__UpperCamelCase )["""last_hidden_state"""]
_UpperCAmelCase = model(__UpperCamelCase , past_key_values=__UpperCamelCase )["""last_hidden_state"""]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def _snake_case ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Dict , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).to(__UpperCamelCase ).half().eval()
_UpperCAmelCase = model(**__UpperCamelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__UpperCamelCase ).any().item() )
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a : Optional[Any] = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a : Any = True
a : Optional[int] = False
a : Any = False
a : Optional[int] = True
a : Optional[Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a : int = [0.8, 0.9]
def _snake_case ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = UMTaModel(config_and_inputs[0] ).to(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=__UpperCamelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs[0]
_UpperCAmelCase = UMTaForConditionalGeneration(__UpperCamelCase ).eval()
model.to(__UpperCamelCase )
_UpperCAmelCase = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__UpperCamelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
}
for attn_name, (name, mask) in zip(__UpperCamelCase , head_masking.items() ):
_UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCamelCase )
_UpperCAmelCase = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , **__UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _snake_case ( self : Tuple ) ->List[Any]:
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__UpperCamelCase ).to(__UpperCamelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__UpperCamelCase , legacy=__UpperCamelCase )
_UpperCAmelCase = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase ).input_ids
# fmt: off
_UpperCAmelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model.generate(input_ids.to(__UpperCamelCase ) )
_UpperCAmelCase = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Any = logging.get_logger(__name__)
a : Any = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
'''tokenizer_config_file''': '''tokenizer_config.json''',
}
a : Any = {
'''vocab_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/vocab.json'''
},
'''merges_file''': {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/merges.txt'''
},
'''tokenizer_config_file''': {
'''facebook/blenderbot_small-90M''': (
'''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/tokenizer_config.json'''
)
},
}
a : List[Any] = {'''facebook/blenderbot_small-90M''': 5_1_2}
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
_UpperCAmelCase = set(lowerCamelCase__ )
return pairs
class a_ ( _UpperCamelCase ):
a : Optional[Any] = VOCAB_FILES_NAMES
a : Any = PRETRAINED_VOCAB_FILES_MAP
a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Any = ['input_ids', 'attention_mask']
def __init__( self : int , __UpperCamelCase : Tuple , __UpperCamelCase : List[str] , __UpperCamelCase : List[str]="__start__" , __UpperCamelCase : Any="__end__" , __UpperCamelCase : Dict="__unk__" , __UpperCamelCase : Union[str, Any]="__null__" , **__UpperCamelCase : str , ) ->str:
'''simple docstring'''
super().__init__(unk_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , pad_token=__UpperCamelCase , **__UpperCamelCase )
with open(__UpperCamelCase , encoding="""utf-8""" ) as vocab_handle:
_UpperCAmelCase = json.load(__UpperCamelCase )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
with open(__UpperCamelCase , encoding="""utf-8""" ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
_UpperCAmelCase = [tuple(merge.split() ) for merge in merges]
_UpperCAmelCase = dict(zip(__UpperCamelCase , range(len(__UpperCamelCase ) ) ) )
_UpperCAmelCase = {}
@property
def _snake_case ( self : Optional[Any] ) ->Any:
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : str ) ->int:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = re.sub("""([.,!?()])""" , r""" \1""" , __UpperCamelCase )
_UpperCAmelCase = re.sub("""(')""" , r""" \1 """ , __UpperCamelCase )
_UpperCAmelCase = re.sub(r"""\s{2,}""" , """ """ , __UpperCamelCase )
if "\n" in token:
_UpperCAmelCase = token.replace("""\n""" , """ __newln__""" )
_UpperCAmelCase = token.split(""" """ )
_UpperCAmelCase = []
for token in tokens:
if not len(__UpperCamelCase ):
continue
_UpperCAmelCase = token.lower()
_UpperCAmelCase = tuple(__UpperCamelCase )
_UpperCAmelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_UpperCAmelCase = get_pairs(__UpperCamelCase )
if not pairs:
words.append(__UpperCamelCase )
continue
while True:
_UpperCAmelCase = min(__UpperCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__UpperCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(__UpperCamelCase ):
try:
_UpperCAmelCase = word.index(__UpperCamelCase , __UpperCamelCase )
new_word.extend(word[i:j] )
_UpperCAmelCase = j
except ValueError:
new_word.extend(word[i:] )
break
if word[i] == first and i < len(__UpperCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(__UpperCamelCase )
_UpperCAmelCase = new_word
if len(__UpperCamelCase ) == 1:
break
else:
_UpperCAmelCase = get_pairs(__UpperCamelCase )
_UpperCAmelCase = "@@ ".join(__UpperCamelCase )
_UpperCAmelCase = word[:-4]
_UpperCAmelCase = word
words.append(__UpperCamelCase )
return " ".join(__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : str ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = re.findall(r"""\S+\n?""" , __UpperCamelCase )
for token in words:
split_tokens.extend(list(self.bpe(__UpperCamelCase ).split(""" """ ) ) )
return split_tokens
def _snake_case ( self : List[Any] , __UpperCamelCase : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = token.lower()
return self.encoder.get(__UpperCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : int ) ->str:
'''simple docstring'''
return self.decoder.get(__UpperCamelCase , self.unk_token )
def _snake_case ( self : int , __UpperCamelCase : List[str] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = " ".join(__UpperCamelCase ).replace("""@@ """ , """""" ).strip()
return out_string
def _snake_case ( self : Any , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) ->int:
'''simple docstring'''
if not os.path.isdir(__UpperCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__UpperCamelCase , ensure_ascii=__UpperCamelCase ) + """\n""" )
_UpperCAmelCase = 0
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
_UpperCAmelCase = token_index
writer.write(""" """.join(__UpperCamelCase ) + """\n""" )
index += 1
return vocab_file, merge_file | 714 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( _UpperCAmelCase ):
def _snake_case ( self : str ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : Any ) ->List[str]:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _snake_case ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : str ) ->Dict:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def _snake_case ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def _snake_case ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _snake_case ( self : str ) ->Optional[Any]:
'''simple docstring'''
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__UpperCamelCase ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
_UpperCAmelCase ,_UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __UpperCamelCase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(_A ) if isinstance(_A , pa.Buffer ) else pa.memory_map(_A )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=_A , features=_A ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_A )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
_UpperCAmelCase = os.path.join(_A , """test.arrow""" )
with ArrowWriter(path=_A , schema=pa.schema(_A ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(_A , 1 )
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
if pa.types.is_list(_A ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
if isinstance(lst[0] , _A ):
change_first_primitive_element_in_list(lst[0] , _A )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(_A , optimized_int_type=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(_A )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_A , _A )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=_A ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = """mock://dataset-train.arrow"""
with ArrowWriter(path=_A , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_A ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_A )
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_A , format="""png""" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=_A , features=Features({"""image""": Image()} ) , embed_local_files=_A ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , _A )
with open(_A , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("""col_1""" , pa.string() , nullable=_A )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=_A ) as writer:
writer._build_writer(inferred_schema=_A )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] ) | 19 | 0 |
"""simple docstring"""
from collections.abc import Callable
class a_ :
def __init__( self : Dict , __UpperCamelCase : Callable | None = None ) ->None:
'''simple docstring'''
_UpperCAmelCase = []
# Stores indexes of each item for supporting updates and deletion.
_UpperCAmelCase = {}
# Stores current size of heap.
_UpperCAmelCase = 0
# Stores function used to evaluate the score of an item on which basis ordering
# will be done.
_UpperCAmelCase = key or (lambda __UpperCamelCase : x)
def _snake_case ( self : Optional[int] , __UpperCamelCase : int ) ->int | None:
'''simple docstring'''
return int((i - 1) / 2 ) if i > 0 else None
def _snake_case ( self : Optional[int] , __UpperCamelCase : int ) ->int | None:
'''simple docstring'''
_UpperCAmelCase = int(2 * i + 1 )
return left if 0 < left < self.size else None
def _snake_case ( self : List[str] , __UpperCamelCase : int ) ->int | None:
'''simple docstring'''
_UpperCAmelCase = int(2 * i + 2 )
return right if 0 < right < self.size else None
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int ) ->None:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = (
self.pos_map[self.arr[j][0]],
self.pos_map[self.arr[i][0]],
)
# Then swap the items in the list.
_UpperCAmelCase ,_UpperCAmelCase = self.arr[j], self.arr[i]
def _snake_case ( self : List[Any] , __UpperCamelCase : int , __UpperCamelCase : int ) ->bool:
'''simple docstring'''
return self.arr[i][1] < self.arr[j][1]
def _snake_case ( self : Optional[Any] , __UpperCamelCase : int ) ->int:
'''simple docstring'''
_UpperCAmelCase = self._left(lowercase_ )
_UpperCAmelCase = self._right(lowercase_ )
_UpperCAmelCase = i
if left is not None and not self._cmp(lowercase_ , lowercase_ ):
_UpperCAmelCase = left
if right is not None and not self._cmp(lowercase_ , lowercase_ ):
_UpperCAmelCase = right
return valid_parent
def _snake_case ( self : Tuple , __UpperCamelCase : int ) ->None:
'''simple docstring'''
_UpperCAmelCase = self._parent(lowercase_ )
while parent is not None and not self._cmp(lowercase_ , lowercase_ ):
self._swap(lowercase_ , lowercase_ )
_UpperCAmelCase ,_UpperCAmelCase = parent, self._parent(lowercase_ )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : int ) ->None:
'''simple docstring'''
_UpperCAmelCase = self._get_valid_parent(lowercase_ )
while valid_parent != index:
self._swap(lowercase_ , lowercase_ )
_UpperCAmelCase ,_UpperCAmelCase = valid_parent, self._get_valid_parent(lowercase_ )
def _snake_case ( self : List[str] , __UpperCamelCase : int , __UpperCamelCase : int ) ->None:
'''simple docstring'''
if item not in self.pos_map:
return
_UpperCAmelCase = self.pos_map[item]
_UpperCAmelCase = [item, self.key(lowercase_ )]
# Make sure heap is right in both up and down direction.
# Ideally only one of them will make any change.
self._heapify_up(lowercase_ )
self._heapify_down(lowercase_ )
def _snake_case ( self : str , __UpperCamelCase : int ) ->None:
'''simple docstring'''
if item not in self.pos_map:
return
_UpperCAmelCase = self.pos_map[item]
del self.pos_map[item]
_UpperCAmelCase = self.arr[self.size - 1]
_UpperCAmelCase = index
self.size -= 1
# Make sure heap is right in both up and down direction. Ideally only one
# of them will make any change- so no performance loss in calling both.
if self.size > index:
self._heapify_up(lowercase_ )
self._heapify_down(lowercase_ )
def _snake_case ( self : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->None:
'''simple docstring'''
_UpperCAmelCase = len(self.arr )
if arr_len == self.size:
self.arr.append([item, self.key(lowercase_ )] )
else:
_UpperCAmelCase = [item, self.key(lowercase_ )]
_UpperCAmelCase = self.size
self.size += 1
self._heapify_up(self.size - 1 )
def _snake_case ( self : Optional[Any] ) ->tuple | None:
'''simple docstring'''
return self.arr[0] if self.size else None
def _snake_case ( self : List[Any] ) ->tuple | None:
'''simple docstring'''
_UpperCAmelCase = self.get_top()
if top_item_tuple:
self.delete_item(top_item_tuple[0] )
return top_item_tuple
def _UpperCamelCase ( ) -> None:
"""simple docstring"""
if __name__ == "__main__":
import doctest
doctest.testmod() | 715 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : List[Any] = get_logger()
a : Optional[dict] = None
class a_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : int ) ->Tuple:
'''simple docstring'''
super().__init__(features=__UpperCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(__UpperCamelCase )}, as `jaxlib.xla_extension.Device` """
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
_UpperCAmelCase = device if isinstance(__UpperCamelCase , __UpperCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
_UpperCAmelCase = str(jax.devices()[0] )
_UpperCAmelCase = jnp_array_kwargs
@staticmethod
def _snake_case ( ) ->Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(__UpperCamelCase ): device for device in jax.devices()}
def _snake_case ( self : Dict , __UpperCamelCase : Any ) ->Union[str, Any]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , __UpperCamelCase ) and column:
if all(
isinstance(__UpperCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__UpperCamelCase , axis=0 )
return column
def _snake_case ( self : List[str] , __UpperCamelCase : Any ) ->Optional[int]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , (str, bytes, type(__UpperCamelCase )) ):
return value
elif isinstance(__UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase = {}
if isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
else:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
elif isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCamelCase , PIL.Image.Image ):
_UpperCAmelCase = np.asarray(__UpperCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__UpperCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__UpperCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__UpperCamelCase , """__array__""" ) and not isinstance(__UpperCamelCase , jax.Array ):
_UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
elif isinstance(__UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : dict ) ->int:
'''simple docstring'''
return map_nested(self._recursive_tensorize , __UpperCamelCase , map_list=__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_row(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_row(__UpperCamelCase )
return self.recursive_tensorize(__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : pa.Table ) ->"jax.Array":
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_column(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_column(__UpperCamelCase , pa_table.column_names[0] )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
_UpperCAmelCase = self._consolidate(__UpperCamelCase )
return column
def _snake_case ( self : Optional[Any] , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_batch(__UpperCamelCase )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
for column_name in batch:
_UpperCAmelCase = self._consolidate(batch[column_name] )
return batch | 19 | 0 |
"""simple docstring"""
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : int = {
'''facebook/wav2vec2-base-960h''': '''https://huggingface.co/facebook/wav2vec2-base-960h/resolve/main/config.json''',
# See all Wav2Vec2 models at https://huggingface.co/models?filter=wav2vec2
}
class a_ ( __lowerCAmelCase ):
'''simple docstring'''
a : Dict = '''wav2vec2'''
def __init__( self : Union[str, Any] , __UpperCamelCase : Optional[int]=32 , __UpperCamelCase : Optional[Any]=7_68 , __UpperCamelCase : List[Any]=12 , __UpperCamelCase : Union[str, Any]=12 , __UpperCamelCase : Optional[int]=30_72 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Tuple=0.1 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : Optional[int]=0.0 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : List[str]=0.1 , __UpperCamelCase : int=0.0_2 , __UpperCamelCase : Tuple=1e-5 , __UpperCamelCase : Optional[int]="group" , __UpperCamelCase : Optional[Any]="gelu" , __UpperCamelCase : int=(5_12, 5_12, 5_12, 5_12, 5_12, 5_12, 5_12) , __UpperCamelCase : List[str]=(5, 2, 2, 2, 2, 2, 2) , __UpperCamelCase : str=(10, 3, 3, 3, 3, 2, 2) , __UpperCamelCase : int=False , __UpperCamelCase : List[str]=1_28 , __UpperCamelCase : List[Any]=16 , __UpperCamelCase : Any=False , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : int=0.0_5 , __UpperCamelCase : Optional[Any]=10 , __UpperCamelCase : Union[str, Any]=2 , __UpperCamelCase : Optional[int]=0.0 , __UpperCamelCase : List[str]=10 , __UpperCamelCase : int=0 , __UpperCamelCase : Dict=3_20 , __UpperCamelCase : List[Any]=2 , __UpperCamelCase : Optional[Any]=0.1 , __UpperCamelCase : Union[str, Any]=1_00 , __UpperCamelCase : Tuple=2_56 , __UpperCamelCase : str=2_56 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Tuple="sum" , __UpperCamelCase : Tuple=False , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Optional[int]=2_56 , __UpperCamelCase : Optional[Any]=(5_12, 5_12, 5_12, 5_12, 15_00) , __UpperCamelCase : List[Any]=(5, 3, 3, 1, 1) , __UpperCamelCase : str=(1, 2, 3, 1, 1) , __UpperCamelCase : List[Any]=5_12 , __UpperCamelCase : str=0 , __UpperCamelCase : int=1 , __UpperCamelCase : Any=2 , __UpperCamelCase : List[Any]=False , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : str=2 , __UpperCamelCase : int=3 , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Tuple=None , **__UpperCamelCase : List[str] , ) ->Optional[Any]:
'''simple docstring'''
super().__init__(**lowerCAmelCase_ , pad_token_id=lowerCAmelCase_ , bos_token_id=lowerCAmelCase_ , eos_token_id=lowerCAmelCase_ )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = feat_extract_norm
_UpperCAmelCase = feat_extract_activation
_UpperCAmelCase = list(lowerCAmelCase_ )
_UpperCAmelCase = list(lowerCAmelCase_ )
_UpperCAmelCase = list(lowerCAmelCase_ )
_UpperCAmelCase = conv_bias
_UpperCAmelCase = num_conv_pos_embeddings
_UpperCAmelCase = num_conv_pos_embedding_groups
_UpperCAmelCase = len(self.conv_dim )
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_dropout
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = activation_dropout
_UpperCAmelCase = feat_proj_dropout
_UpperCAmelCase = final_dropout
_UpperCAmelCase = layerdrop
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
_UpperCAmelCase = vocab_size
_UpperCAmelCase = do_stable_layer_norm
_UpperCAmelCase = use_weighted_layer_sum
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
"""Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =="""
""" `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ="""
f""" {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,"""
f""" `len(config.conv_kernel) = {len(self.conv_kernel )}`.""" )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
_UpperCAmelCase = apply_spec_augment
_UpperCAmelCase = mask_time_prob
_UpperCAmelCase = mask_time_length
_UpperCAmelCase = mask_time_min_masks
_UpperCAmelCase = mask_feature_prob
_UpperCAmelCase = mask_feature_length
_UpperCAmelCase = mask_feature_min_masks
# parameters for pretraining with codevector quantized representations
_UpperCAmelCase = num_codevectors_per_group
_UpperCAmelCase = num_codevector_groups
_UpperCAmelCase = contrastive_logits_temperature
_UpperCAmelCase = feat_quantizer_dropout
_UpperCAmelCase = num_negatives
_UpperCAmelCase = codevector_dim
_UpperCAmelCase = proj_codevector_dim
_UpperCAmelCase = diversity_loss_weight
# ctc loss
_UpperCAmelCase = ctc_loss_reduction
_UpperCAmelCase = ctc_zero_infinity
# adapter
_UpperCAmelCase = add_adapter
_UpperCAmelCase = adapter_kernel_size
_UpperCAmelCase = adapter_stride
_UpperCAmelCase = num_adapter_layers
_UpperCAmelCase = output_hidden_size or hidden_size
_UpperCAmelCase = adapter_attn_dim
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
_UpperCAmelCase = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
_UpperCAmelCase = list(lowerCAmelCase_ )
_UpperCAmelCase = list(lowerCAmelCase_ )
_UpperCAmelCase = list(lowerCAmelCase_ )
_UpperCAmelCase = xvector_output_dim
@property
def _snake_case ( self : int ) ->int:
'''simple docstring'''
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 716 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Tuple = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 19 | 0 |
"""simple docstring"""
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from ..models.auto import AutoModelForVisionaSeq
from ..utils import requires_backends
from .base import PipelineTool
if TYPE_CHECKING:
from PIL import Image
class a_ ( A_ ):
a : List[Any] = 'Salesforce/blip-image-captioning-base'
a : Dict = (
'This is a tool that generates a description of an image. It takes an input named `image` which should be the '
'image to caption, and returns a text that contains the description in English.'
)
a : str = 'image_captioner'
a : Dict = AutoModelForVisionaSeq
a : Optional[Any] = ['image']
a : List[str] = ['text']
def __init__( self : List[str] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
requires_backends(self , ["""vision"""] )
super().__init__(*UpperCAmelCase__ , **UpperCAmelCase__ )
def _snake_case ( self : Optional[int] , __UpperCamelCase : "Image" ) ->Any:
'''simple docstring'''
return self.pre_processor(images=UpperCAmelCase__ , return_tensors="""pt""" )
def _snake_case ( self : Any , __UpperCamelCase : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
return self.model.generate(**UpperCAmelCase__ )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : List[Any] ) ->Dict:
'''simple docstring'''
return self.pre_processor.batch_decode(UpperCAmelCase__ , skip_special_tokens=UpperCAmelCase__ )[0].strip() | 717 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
a : int = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _UpperCamelCase ( _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = test_results.split(""" """ )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_UpperCAmelCase = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(_A ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = None
_UpperCAmelCase = False
for line in failures_short_lines.split("""\n""" ):
if re.search(R"""_ \[doctest\]""" , _A ):
_UpperCAmelCase = True
_UpperCAmelCase = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
_UpperCAmelCase = line
_UpperCAmelCase = False
return failures
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = title
_UpperCAmelCase = doc_test_results["""time_spent"""].split(""",""" )[0]
_UpperCAmelCase = doc_test_results["""success"""]
_UpperCAmelCase = doc_test_results["""failures"""]
_UpperCAmelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
_UpperCAmelCase = doc_test_results
@property
def _snake_case ( self : int ) ->str:
'''simple docstring'''
_UpperCAmelCase = [self._time_spent]
_UpperCAmelCase = 0
for time in time_spent:
_UpperCAmelCase = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = [0, 0, time_parts[0]]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f"""{int(__UpperCamelCase )}h{int(__UpperCamelCase )}m{int(__UpperCamelCase )}s"""
@property
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = 40
_UpperCAmelCase = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(__UpperCamelCase , __UpperCamelCase )}
_UpperCAmelCase = """"""
for category, failures in category_failures.items():
if len(__UpperCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__UpperCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__UpperCamelCase )
@staticmethod
def _snake_case ( ) ->Any:
'''simple docstring'''
_UpperCAmelCase = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(__UpperCamelCase )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=__UpperCamelCase , )
def _snake_case ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
_UpperCAmelCase = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else """All tests passed."""
_UpperCAmelCase = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=__UpperCamelCase , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase = """"""
for key, value in failures.items():
_UpperCAmelCase = value[:2_00] + """ [Truncated]""" if len(__UpperCamelCase ) > 2_50 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
_UpperCAmelCase = job_name
_UpperCAmelCase = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
_UpperCAmelCase = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _snake_case ( self : int ) ->Optional[Any]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
_UpperCAmelCase = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
_UpperCAmelCase = sorted(self.doc_test_results.items() , key=lambda __UpperCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
_UpperCAmelCase = f"""*Num failures* :{len(job_result["failed"] )} \n"""
_UpperCAmelCase = job_result["""failures"""]
_UpperCAmelCase = self.get_reply_blocks(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , text=__UpperCamelCase )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f"""Results for {job}""" , blocks=__UpperCamelCase , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = os.environ["""GITHUB_RUN_ID"""]
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A ).json()
_UpperCAmelCase = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , _A )
return {}
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {}
if os.path.exists(_A ):
_UpperCAmelCase = os.listdir(_A )
for file in files:
try:
with open(os.path.join(_A , _A ) , encoding="""utf-8""" ) as f:
_UpperCAmelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(_A , _A )}.""" ) from e
return _artifact
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
class a_ :
def __init__( self : List[Any] , __UpperCamelCase : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = name
_UpperCAmelCase = []
def __str__( self : int ) ->Optional[Any]:
'''simple docstring'''
return self.name
def _snake_case ( self : Dict , __UpperCamelCase : str ) ->int:
'''simple docstring'''
self.paths.append({"""name""": self.name, """path""": path} )
_UpperCAmelCase = {}
_UpperCAmelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
_UpperCAmelCase = directory
if artifact_name not in _available_artifacts:
_UpperCAmelCase = Artifact(_A )
_available_artifacts[artifact_name].add_path(_A )
return _available_artifacts
if __name__ == "__main__":
a : Dict = get_job_links()
a : Dict = retrieve_available_artifacts()
a : Optional[int] = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
a : Dict = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
a : int = github_actions_job_links.get('''run_doctests''')
a : Tuple = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
a : Optional[Any] = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
a , a , a : str = handle_test_results(artifact['''stats'''])
a : Tuple = failed
a : int = success
a : Any = time_spent[1:-1] + ''', '''
a : Dict = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
a : List[Any] = line.replace('''FAILED ''', '''''')
a : Tuple = line.split()[0].replace('''\n''', '''''')
if "::" in line:
a , a : Union[str, Any] = line.split('''::''')
else:
a , a : Optional[Any] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
a : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
a : Optional[Any] = all_failures[test] if test in all_failures else '''N/A'''
a : List[str] = failure
break
a : List[Any] = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply() | 19 | 0 |
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
return 1 if digit in (0, 1) else (digit * factorial(digit - 1 ))
def _UpperCamelCase ( _A ) -> bool:
"""simple docstring"""
_UpperCAmelCase = 0
_UpperCAmelCase = number
while duplicate > 0:
_UpperCAmelCase = divmod(_A , 1_0 )
fact_sum += factorial(_A )
return fact_sum == number
if __name__ == "__main__":
print('''Program to check whether a number is a Krisnamurthy Number or not.''')
a : Any = int(input('''Enter number: ''').strip())
print(
F"{number} is {'' if krishnamurthy(number) else 'not '}a Krishnamurthy Number."
) | 718 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase ( _A , _A=False ) -> str:
"""simple docstring"""
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
a : Union[str, Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
a : Tuple = parse_flag_from_env('''RUN_REMOTE''', default=False)
a : Union[str, Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
a : int = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
a : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
a : List[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
a : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
a : int = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
a : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
a : Any = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
a : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires faiss""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires regex""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires elasticsearch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires sqlalchemy""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires PyTorch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
if not config.TF_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires TensorFlow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
if not config.JAX_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires JAX""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not config.PIL_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires Pillow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
def _require_spacy_model(_A ):
try:
import spacy # noqa F401
spacy.load(_A )
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(_A ) )(_A )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCAmelCase = unittest.skip("""test is slow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
_UpperCAmelCase = unittest.skip("""test is local""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCAmelCase = unittest.skip("""test is packaged""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCAmelCase = unittest.skip("""test requires remote""" )(_A )
return test_case
def _UpperCamelCase ( *_A ) -> Dict:
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_A ) and name.startswith("""test""" ):
for decorator in decorators:
_UpperCAmelCase = decorator(_A )
setattr(cls , _A , _A )
return cls
return decorate
class a_ ( _UpperCAmelCase ):
pass
class a_ ( _UpperCAmelCase ):
a : Any = 0
a : Optional[Any] = 1
a : int = 2
@contextmanager
def _UpperCamelCase ( _A=OfflineSimulationMode.CONNECTION_FAILS , _A=1e-16 ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = requests.Session().request
def timeout_request(_A , _A , _A , **_A ):
# Change the url to an invalid url so that the connection hangs
_UpperCAmelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_UpperCAmelCase = timeout
try:
return online_request(_A , _A , **_A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCAmelCase = url
_UpperCAmelCase = e.args[0]
_UpperCAmelCase = (max_retry_error.args[0].replace("""10.255.255.1""" , F"""OfflineMock[{url}]""" ),)
_UpperCAmelCase = (max_retry_error,)
raise
def raise_connection_error(_A , _A , **_A ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=_A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , _A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , _A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , _A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _UpperCamelCase ( *_A , **_A ) -> str:
"""simple docstring"""
_UpperCAmelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_A , **_A ) as tmp_dir:
try:
os.chdir(_A )
yield
finally:
os.chdir(_A )
@contextmanager
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
return deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist()
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_A , *_A , **_A ):
try:
return func(*_A , **_A )
except HTTPError as err:
if str(_A ).startswith("""500""" ) or str(_A ).startswith("""502""" ):
pytest.xfail(str(_A ) )
raise err
return decorator.decorator(_wrapper , _A )
class a_ :
def __init__( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_A )
else:
break
async def _UpperCamelCase ( _A , _A=None , _A=None , _A=None , _A=False , _A=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(_A ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_A , _A , _A , _A="" ):
_UpperCAmelCase = line.decode("""utf-8""" ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _A : tee(_A , _A , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda _A : tee(_A , _A , sys.stderr , label="""stderr:""" ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def _UpperCamelCase ( _A , _A=None , _A=None , _A=1_8_0 , _A=False , _A=True ) -> _RunOutput:
"""simple docstring"""
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
_UpperCAmelCase = """ """.join(_A )
if result.returncode > 0:
_UpperCAmelCase = """\n""".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
_UpperCAmelCase = re.sub(R"""^gw""" , """""" , _A , 0 , re.M )
return int(_A )
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 2_9_5_0_0
_UpperCAmelCase = pytest_xdist_worker_id()
return port + uniq_delta | 19 | 0 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCAmelCase = np.zeros((n + 1,) )
_UpperCAmelCase = ya
_UpperCAmelCase = xa
for k in range(__a ):
_UpperCAmelCase = y[k] + step_size * ode_func(__a , y[k] )
_UpperCAmelCase = y[k] + (
(step_size / 2) * (ode_func(__a , y[k] ) + ode_func(x + step_size , __a ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 719 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class a_ ( _UpperCAmelCase ):
a : List[Any] = ''
a : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Tuple , __UpperCamelCase : Optional[DatasetInfo] = None , __UpperCamelCase : Optional[str] = None , **__UpperCamelCase : Any , ) ->Any:
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
_UpperCAmelCase = repo_info
_UpperCAmelCase = token
_UpperCAmelCase = None
def _snake_case ( self : List[str] ) ->List[str]:
'''simple docstring'''
if self.dir_cache is None:
_UpperCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_UpperCAmelCase = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : str = "rb" , **__UpperCamelCase : Any , ) ->List[str]:
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
_UpperCAmelCase = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def _snake_case ( self : int , __UpperCamelCase : int , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple=False , **__UpperCamelCase : List[str] ) ->Optional[Any]:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = PurePosixPath(path.strip("""/""" ) )
_UpperCAmelCase = {}
for p, f in self.dir_cache.items():
_UpperCAmelCase = PurePosixPath(p.strip("""/""" ) )
_UpperCAmelCase = p.parent
if root == path:
_UpperCAmelCase = f
_UpperCAmelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 19 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
a : str = logging.get_logger(__name__)
a : Optional[int] = {
"""MIT/ast-finetuned-audioset-10-10-0.4593""": (
"""https://huggingface.co/MIT/ast-finetuned-audioset-10-10-0.4593/resolve/main/config.json"""
),
}
class a_ ( _snake_case ):
a : int = """audio-spectrogram-transformer"""
def __init__( self : List[str] , __UpperCamelCase : List[Any]=7_68 , __UpperCamelCase : Any=12 , __UpperCamelCase : Optional[int]=12 , __UpperCamelCase : Optional[Any]=30_72 , __UpperCamelCase : Any="gelu" , __UpperCamelCase : str=0.0 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : List[Any]=0.0_2 , __UpperCamelCase : Dict=1e-12 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[int]=10 , __UpperCamelCase : Any=10 , __UpperCamelCase : List[str]=10_24 , __UpperCamelCase : Union[str, Any]=1_28 , **__UpperCamelCase : Optional[Any] , ) ->str:
'''simple docstring'''
super().__init__(**snake_case_ )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = patch_size
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = frequency_stride
_UpperCAmelCase = time_stride
_UpperCAmelCase = max_length
_UpperCAmelCase = num_mel_bins | 720 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
a : Optional[Any] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
a : List[str] = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
a : Any = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
a : int = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
a : List[Any] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any]=[1, 10, 1_00] , __UpperCamelCase : Dict=4 , __UpperCamelCase : Tuple=3.0 ) ->Union[str, Any]:
'''simple docstring'''
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=__UpperCamelCase ) as executor:
_UpperCAmelCase = []
_UpperCAmelCase = Counter()
_UpperCAmelCase = 0
_UpperCAmelCase = defaultdict(__UpperCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(__UpperCamelCase , __UpperCamelCase ) ):
for candidate in candidates:
_UpperCAmelCase = candidate + """\n""" + test_case
_UpperCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
_UpperCAmelCase = executor.submit(__UpperCamelCase , *__UpperCamelCase )
futures.append(__UpperCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__UpperCamelCase ):
_UpperCAmelCase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
_UpperCAmelCase ,_UpperCAmelCase = [], []
for result in results.values():
result.sort()
_UpperCAmelCase = [r[1]["""passed"""] for r in result]
total.append(len(__UpperCamelCase ) )
correct.append(sum(__UpperCamelCase ) )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = k
_UpperCAmelCase = {f"""pass@{k}""": estimate_pass_at_k(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
def estimator(_A , _A , _A ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_A , _A ):
_UpperCAmelCase = itertools.repeat(_A , len(_A ) )
else:
assert len(_A ) == len(_A )
_UpperCAmelCase = iter(_A )
return np.array([estimator(int(_A ) , int(_A ) , _A ) for n, c in zip(_A , _A )] ) | 19 | 0 |
from ....configuration_utils import PretrainedConfig
from ....utils import logging
a : Dict = logging.get_logger(__name__)
a : str = {
'''speechbrain/m-ctc-t-large''': '''https://huggingface.co/speechbrain/m-ctc-t-large/resolve/main/config.json''',
# See all M-CTC-T models at https://huggingface.co/models?filter=mctct
}
class a_ ( lowercase__ ):
a : int = 'mctct'
def __init__( self : Any , __UpperCamelCase : str=80_65 , __UpperCamelCase : str=15_36 , __UpperCamelCase : int=36 , __UpperCamelCase : Union[str, Any]=61_44 , __UpperCamelCase : str=4 , __UpperCamelCase : str=3_84 , __UpperCamelCase : List[str]=9_20 , __UpperCamelCase : Dict=1e-5 , __UpperCamelCase : Any=0.3 , __UpperCamelCase : int="relu" , __UpperCamelCase : Dict=0.0_2 , __UpperCamelCase : Optional[Any]=0.3 , __UpperCamelCase : Dict=0.3 , __UpperCamelCase : Any=1 , __UpperCamelCase : Any=0 , __UpperCamelCase : List[str]=2 , __UpperCamelCase : List[Any]=1 , __UpperCamelCase : Optional[Any]=0.3 , __UpperCamelCase : int=1 , __UpperCamelCase : Dict=(7,) , __UpperCamelCase : Any=(3,) , __UpperCamelCase : Union[str, Any]=80 , __UpperCamelCase : Optional[Any]=1 , __UpperCamelCase : Dict=None , __UpperCamelCase : List[str]="sum" , __UpperCamelCase : Optional[Any]=False , **__UpperCamelCase : Dict , ) ->Optional[int]:
'''simple docstring'''
super().__init__(**UpperCAmelCase__ , pad_token_id=UpperCAmelCase__ , bos_token_id=UpperCAmelCase__ , eos_token_id=UpperCAmelCase__ )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = attention_head_dim
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = layerdrop
_UpperCAmelCase = hidden_act
_UpperCAmelCase = initializer_range
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = conv_glu_dim
_UpperCAmelCase = conv_dropout
_UpperCAmelCase = num_conv_layers
_UpperCAmelCase = input_feat_per_channel
_UpperCAmelCase = input_channels
_UpperCAmelCase = conv_channels
_UpperCAmelCase = ctc_loss_reduction
_UpperCAmelCase = ctc_zero_infinity
# prevents config testing fail with exporting to json
_UpperCAmelCase = list(UpperCAmelCase__ )
_UpperCAmelCase = list(UpperCAmelCase__ )
if len(self.conv_kernel ) != self.num_conv_layers:
raise ValueError(
"""Configuration for convolutional module is incorrect. """
"""It is required that `len(config.conv_kernel)` == `config.num_conv_layers` """
f"""but is `len(config.conv_kernel) = {len(self.conv_kernel )}`, """
f"""`config.num_conv_layers = {self.num_conv_layers}`.""" ) | 721 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> np.array:
"""simple docstring"""
_UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCAmelCase = np.zeros((n + 1,) )
_UpperCAmelCase = ya
_UpperCAmelCase = xa
for k in range(_A ):
_UpperCAmelCase = y[k] + step_size * ode_func(_A , y[k] )
_UpperCAmelCase = y[k] + (
(step_size / 2) * (ode_func(_A , y[k] ) + ode_func(x + step_size , _A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 19 | 0 |
"""simple docstring"""
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = []
for data in source_data:
for i, el in enumerate(_lowerCAmelCase ):
if len(_lowerCAmelCase ) < i + 1:
data_lists.append([] )
data_lists[i].append(float(_lowerCAmelCase ) )
return data_lists
def _UpperCamelCase ( _A , _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = []
for dlist, weight in zip(_lowerCAmelCase , _lowerCAmelCase ):
_UpperCAmelCase = min(_lowerCAmelCase )
_UpperCAmelCase = max(_lowerCAmelCase )
_UpperCAmelCase = []
# for weight 0 score is 1 - actual score
if weight == 0:
for item in dlist:
try:
score.append(1 - ((item - mind) / (maxd - mind)) )
except ZeroDivisionError:
score.append(1 )
elif weight == 1:
for item in dlist:
try:
score.append((item - mind) / (maxd - mind) )
except ZeroDivisionError:
score.append(0 )
# weight not 0 or 1
else:
_UpperCAmelCase = F"""Invalid weight of {weight:f} provided"""
raise ValueError(_lowerCAmelCase )
score_lists.append(_lowerCAmelCase )
return score_lists
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = [0 for i in range(len(score_lists[0] ) )]
for slist in score_lists:
for j, ele in enumerate(_lowerCAmelCase ):
_UpperCAmelCase = final_scores[j] + ele
return final_scores
def _UpperCamelCase ( _A , _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = get_data(_lowerCAmelCase )
_UpperCAmelCase = calculate_each_score(_lowerCAmelCase , _lowerCAmelCase )
_UpperCAmelCase = generate_final_scores(_lowerCAmelCase )
# append scores to source data
for i, ele in enumerate(_lowerCAmelCase ):
source_data[i].append(_lowerCAmelCase )
return source_data | 700 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
a : List[str] = logging.getLogger(__name__)
a : int = {'''facebook/bart-base''': BartForConditionalGeneration}
a : Dict = {'''facebook/bart-base''': BartTokenizer}
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_A , default=_A , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_A , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_A , default=_A , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_A , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_A , )
parser.add_argument(
"""--config_name""" , type=_A , default=_A , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_A , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_A , default=_A , help="""Where to store the final ONNX file.""" )
_UpperCAmelCase = parser.parse_args()
return args
def _UpperCamelCase ( _A , _A="cpu" ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = model_dict[model_name].from_pretrained(_A ).to(_A )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_A )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_A ) )
with torch.no_grad():
_UpperCAmelCase = """My friends are cool but they eat too many carbs."""
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="""pt""" ).to(model.device )
_UpperCAmelCase = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_A , max_length=_A , early_stopping=_A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_A , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _A , opset_version=1_4 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_A , )
logger.info("""Model exported to {}""".format(_A ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_A ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_A ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_A )
_UpperCAmelCase = ort_sess.run(
_A , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_A ),
"""max_length""": np.array(_A ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase ,_UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _A )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_A )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_A , _A , _A , _A , _A )
if __name__ == "__main__":
main() | 19 | 0 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Optional[Any] = logging.get_logger(__name__)
a : Dict = {
'vocab_file': 'vocab.json',
'tokenizer_config_file': 'tokenizer_config.json',
'merges_file': 'merges.txt',
}
a : int = {
'vocab_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json'
),
},
'tokenizer_config_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json'
),
},
'merges_file': {
'facebook/s2t-wav2vec2-large-en-de': (
'https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt'
),
},
}
a : Optional[Any] = '</w>'
a : Optional[Any] = '@@ '
def _UpperCamelCase ( _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
return pairs
# Speech2Text2 has no max input length
a : Dict = {'facebook/s2t-wav2vec2-large-en-de': 1_0_2_4}
class a_ ( lowerCamelCase__ ):
a : Tuple = VOCAB_FILES_NAMES
a : List[Any] = PRETRAINED_VOCAB_FILES_MAP
a : List[Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Dict = ['input_ids', 'attention_mask']
def __init__( self : str , __UpperCamelCase : Any , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Any="<pad>" , __UpperCamelCase : str="</s>" , __UpperCamelCase : str="<unk>" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : str=None , **__UpperCamelCase : Any , ) ->str:
'''simple docstring'''
super().__init__(
unk_token=__lowerCamelCase , bos_token=__lowerCamelCase , eos_token=__lowerCamelCase , pad_token=__lowerCamelCase , do_lower_case=__lowerCamelCase , **__lowerCamelCase , )
_UpperCAmelCase = do_lower_case
with open(__lowerCamelCase , encoding="""utf-8""" ) as vocab_handle:
_UpperCAmelCase = json.load(__lowerCamelCase )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_UpperCAmelCase = None
_UpperCAmelCase = None
else:
with open(__lowerCamelCase , encoding="""utf-8""" ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split("""\n""" )[:-1]
_UpperCAmelCase = [tuple(merge.split()[:2] ) for merge in merges]
_UpperCAmelCase = dict(zip(__lowerCamelCase , range(len(__lowerCamelCase ) ) ) )
_UpperCAmelCase = {}
@property
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
return len(self.decoder )
def _snake_case ( self : Any ) ->Dict:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : Optional[int] , __UpperCamelCase : str ) ->Any:
'''simple docstring'''
_UpperCAmelCase = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = get_pairs(__lowerCamelCase )
if not pairs:
return token
while True:
_UpperCAmelCase = min(__lowerCamelCase , key=lambda __UpperCamelCase : self.bpe_ranks.get(__lowerCamelCase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(__lowerCamelCase ):
try:
_UpperCAmelCase = word.index(__lowerCamelCase , __lowerCamelCase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase = j
if word[i] == first and i < len(__lowerCamelCase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(__lowerCamelCase )
_UpperCAmelCase = new_word
if len(__lowerCamelCase ) == 1:
break
else:
_UpperCAmelCase = get_pairs(__lowerCamelCase )
_UpperCAmelCase = ''' '''.join(__lowerCamelCase )
if word == "\n " + BPE_TOKEN_MERGES:
_UpperCAmelCase = '''\n''' + BPE_TOKEN_MERGES
if word.endswith(__lowerCamelCase ):
_UpperCAmelCase = word.replace(__lowerCamelCase , """""" )
_UpperCAmelCase = word.replace(""" """ , __lowerCamelCase )
_UpperCAmelCase = word
return word
def _snake_case ( self : str , __UpperCamelCase : Any ) ->int:
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
"""This tokenizer was instantiated without a `merges.txt` file, so"""
""" that it can only be used for decoding, not for encoding."""
"""Make sure to provide `merges.txt` file at instantiation to enable """
"""encoding.""" )
if self.do_lower_case:
_UpperCAmelCase = text.lower()
_UpperCAmelCase = text.split()
_UpperCAmelCase = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(__lowerCamelCase ).split(""" """ ) ) )
return split_tokens
def _snake_case ( self : List[str] , __UpperCamelCase : Union[str, Any] ) ->int:
'''simple docstring'''
return self.encoder.get(__lowerCamelCase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Optional[int] , __UpperCamelCase : Optional[Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.decoder.get(__lowerCamelCase , self.unk_token )
return result
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Dict ) ->str:
'''simple docstring'''
_UpperCAmelCase = ''' '''.join(__lowerCamelCase )
# make sure @@ tokens are concatenated
_UpperCAmelCase = ''''''.join(string.split(__lowerCamelCase ) )
return string
def _snake_case ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : List[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(__lowerCamelCase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase = os.path.join(
__lowerCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=__lowerCamelCase , ensure_ascii=__lowerCamelCase ) + """\n""" )
_UpperCAmelCase = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(__lowerCamelCase , """w""" , encoding="""utf-8""" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
_UpperCAmelCase = token_index
writer.write(""" """.join(__lowerCamelCase ) + """\n""" )
index += 1
return (vocab_file, merges_file) | 701 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A , _A , _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = requests.get(_A , headers=_A , allow_redirects=_A )
_UpperCAmelCase = result.headers["""Location"""]
_UpperCAmelCase = requests.get(_A , allow_redirects=_A )
_UpperCAmelCase = os.path.join(_A , F"""{artifact_name}.zip""" )
with open(_A , """wb""" ) as fp:
fp.write(response.content )
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = None
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_A ) as f:
for line in f:
_UpperCAmelCase = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_UpperCAmelCase = line[: line.index(""": """ )]
_UpperCAmelCase = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_UpperCAmelCase = line[len("""FAILED """ ) :]
failed_tests.append(_A )
elif filename == "job_name.txt":
_UpperCAmelCase = line
if len(_A ) != len(_A ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(_A )} for `errors` """
F"""and {len(_A )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
_UpperCAmelCase = None
if job_name and job_links:
_UpperCAmelCase = job_links.get(_A , _A )
# A list with elements of the form (line of error, error, failed test)
_UpperCAmelCase = [x + [y] + [job_link] for x, y in zip(_A , _A )]
return result
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = [os.path.join(_A , _A ) for p in os.listdir(_A ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_A , job_links=_A ) )
return errors
def _UpperCamelCase ( _A , _A=None ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_UpperCAmelCase = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_UpperCAmelCase = test.split("""/""" )[2]
else:
_UpperCAmelCase = None
return test
def _UpperCamelCase ( _A , _A=None ) -> Any:
"""simple docstring"""
_UpperCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_UpperCAmelCase = [x for x in logs if x[2] is not None]
_UpperCAmelCase = {x[2] for x in logs}
_UpperCAmelCase = {}
for test in tests:
_UpperCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_UpperCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_UpperCAmelCase = {"""count""": n_errors, """errors""": error_counts}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = """| no. | error | status |"""
_UpperCAmelCase = """|-:|:-|:-|"""
_UpperCAmelCase = [header, sep]
for error in reduced_by_error:
_UpperCAmelCase = reduced_by_error[error]["""count"""]
_UpperCAmelCase = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(_A )
return "\n".join(_A )
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = """| model | no. of errors | major error | count |"""
_UpperCAmelCase = """|-:|-:|-:|-:|"""
_UpperCAmelCase = [header, sep]
for model in reduced_by_model:
_UpperCAmelCase = reduced_by_model[model]["""count"""]
_UpperCAmelCase ,_UpperCAmelCase = list(reduced_by_model[model]["""errors"""].items() )[0]
_UpperCAmelCase = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(_A )
return "\n".join(_A )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
a : Dict = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a : Tuple = get_job_links(args.workflow_run_id, token=args.token)
a : Tuple = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a : List[Any] = k.find(''' / ''')
a : Tuple = k[index + len(''' / ''') :]
a : int = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a : Union[str, Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a : Optional[int] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a : int = reduce_by_error(errors)
a : str = reduce_by_model(errors)
a : int = make_github_table(reduced_by_error)
a : Optional[int] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa) | 19 | 0 |
"""simple docstring"""
import json
import os
from typing import Dict, List, Optional, Tuple
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Dict = {
"vocab_file": "vocab.json",
"tokenizer_config_file": "tokenizer_config.json",
"merges_file": "merges.txt",
}
a : List[Any] = {
"vocab_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/vocab.json"
),
},
"tokenizer_config_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/tokenizer_config.json"
),
},
"merges_file": {
"facebook/s2t-wav2vec2-large-en-de": (
"https://huggingface.co/facebook/s2t-wav2vec2-large-en-de/resolve/main/merges.txt"
),
},
}
a : Dict = "</w>"
a : Optional[int] = "@@ "
def _UpperCamelCase ( _A ) -> int:
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
return pairs
# Speech2Text2 has no max input length
a : Dict = {"facebook/s2t-wav2vec2-large-en-de": 1_0_2_4}
class a_ ( UpperCAmelCase_ ):
a : int = VOCAB_FILES_NAMES
a : Tuple = PRETRAINED_VOCAB_FILES_MAP
a : Any = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Tuple = ['input_ids', 'attention_mask']
def __init__( self : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any]="<s>" , __UpperCamelCase : Dict="<pad>" , __UpperCamelCase : Optional[Any]="</s>" , __UpperCamelCase : List[Any]="<unk>" , __UpperCamelCase : Union[str, Any]=False , __UpperCamelCase : Any=None , **__UpperCamelCase : Tuple , ) ->Tuple:
'''simple docstring'''
super().__init__(
unk_token=_lowercase , bos_token=_lowercase , eos_token=_lowercase , pad_token=_lowercase , do_lower_case=_lowercase , **_lowercase , )
_UpperCAmelCase = do_lower_case
with open(_lowercase , encoding="""utf-8""" ) as vocab_handle:
_UpperCAmelCase = json.load(_lowercase )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
if merges_file is None:
logger.info(f"""No merges files provided. {self.__class__.__name__} can only be used for decoding.""" )
_UpperCAmelCase = None
_UpperCAmelCase = None
else:
with open(_lowercase , encoding="""utf-8""" ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split("""\n""" )[:-1]
_UpperCAmelCase = [tuple(merge.split()[:2] ) for merge in merges]
_UpperCAmelCase = dict(zip(_lowercase , range(len(_lowercase ) ) ) )
_UpperCAmelCase = {}
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
return len(self.decoder )
def _snake_case ( self : List[Any] ) ->Any:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : int , __UpperCamelCase : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = tuple(token[:-1] ) + (token[-1] + BPE_TOKEN_MERGES,)
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = get_pairs(_lowercase )
if not pairs:
return token
while True:
_UpperCAmelCase = min(_lowercase , key=lambda __UpperCamelCase : self.bpe_ranks.get(_lowercase , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase ,_UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(_lowercase ):
try:
_UpperCAmelCase = word.index(_lowercase , _lowercase )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase = j
if word[i] == first and i < len(_lowercase ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(_lowercase )
_UpperCAmelCase = new_word
if len(_lowercase ) == 1:
break
else:
_UpperCAmelCase = get_pairs(_lowercase )
_UpperCAmelCase = """ """.join(_lowercase )
if word == "\n " + BPE_TOKEN_MERGES:
_UpperCAmelCase = """\n""" + BPE_TOKEN_MERGES
if word.endswith(_lowercase ):
_UpperCAmelCase = word.replace(_lowercase , """""" )
_UpperCAmelCase = word.replace(""" """ , _lowercase )
_UpperCAmelCase = word
return word
def _snake_case ( self : str , __UpperCamelCase : int ) ->Optional[Any]:
'''simple docstring'''
if self.bpe_ranks is None:
raise ValueError(
"""This tokenizer was instantiated without a `merges.txt` file, so"""
""" that it can only be used for decoding, not for encoding."""
"""Make sure to provide `merges.txt` file at instantiation to enable """
"""encoding.""" )
if self.do_lower_case:
_UpperCAmelCase = text.lower()
_UpperCAmelCase = text.split()
_UpperCAmelCase = []
for token in text:
if token:
split_tokens.extend(list(self.bpe(_lowercase ).split(""" """ ) ) )
return split_tokens
def _snake_case ( self : int , __UpperCamelCase : str ) ->List[Any]:
'''simple docstring'''
return self.encoder.get(_lowercase , self.encoder.get(self.unk_token ) )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : int ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.decoder.get(_lowercase , self.unk_token )
return result
def _snake_case ( self : Optional[Any] , __UpperCamelCase : List[str] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = """ """.join(_lowercase )
# make sure @@ tokens are concatenated
_UpperCAmelCase = """""".join(string.split(_lowercase ) )
return string
def _snake_case ( self : List[str] , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) ->str:
'''simple docstring'''
if not os.path.isdir(_lowercase ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase = os.path.join(
_lowercase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(_lowercase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_lowercase , ensure_ascii=_lowercase ) + """\n""" )
_UpperCAmelCase = 0
if self.bpe_ranks is None:
return (vocab_file,)
with open(_lowercase , """w""" , encoding="""utf-8""" ) as writer:
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merges_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
_UpperCAmelCase = token_index
writer.write(""" """.join(_lowercase ) + """\n""" )
index += 1
return (vocab_file, merges_file) | 702 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( _UpperCAmelCase ):
a : Any = ['image_processor', 'tokenizer']
a : Optional[int] = 'AutoImageProcessor'
a : Any = 'AutoTokenizer'
def __init__( self : List[str] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=None , **__UpperCamelCase : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop("""feature_extractor""" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def __call__( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->List[str]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""images""" , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""text""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_UpperCAmelCase = self.image_processor(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if text is not None:
_UpperCAmelCase = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCAmelCase = encodings["""input_ids"""]
return inputs
def _snake_case ( self : Union[str, Any] , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ) ->Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ) ->int:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@contextmanager
def _snake_case ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer
yield
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def _snake_case ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Union[str, Any]=None ) ->List[str]:
'''simple docstring'''
if added_vocab is None:
_UpperCAmelCase = self.tokenizer.get_added_vocab()
_UpperCAmelCase = {}
while tokens:
_UpperCAmelCase = re.search(r"""<s_(.*?)>""" , __UpperCamelCase , re.IGNORECASE )
if start_token is None:
break
_UpperCAmelCase = start_token.group(1 )
_UpperCAmelCase = re.search(rf"""</s_{key}>""" , __UpperCamelCase , re.IGNORECASE )
_UpperCAmelCase = start_token.group()
if end_token is None:
_UpperCAmelCase = tokens.replace(__UpperCamelCase , """""" )
else:
_UpperCAmelCase = end_token.group()
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __UpperCamelCase , re.IGNORECASE )
if content is not None:
_UpperCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_UpperCAmelCase = self.tokenajson(__UpperCamelCase , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if value:
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = value[0]
_UpperCAmelCase = value
else: # leaf nodes
_UpperCAmelCase = []
for leaf in content.split(r"""<sep/>""" ):
_UpperCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_UpperCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCamelCase )
if len(output[key] ) == 1:
_UpperCAmelCase = output[key][0]
_UpperCAmelCase = tokens[tokens.find(__UpperCamelCase ) + len(__UpperCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if len(__UpperCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , )
return self.image_processor | 19 | 0 |
"""simple docstring"""
import doctest
import sys
import warnings
from os.path import abspath, dirname, join
import _pytest
from transformers.testing_utils import HfDoctestModule, HfDocTestParser
# allow having multiple repository checkouts and not needing to remember to rerun
# 'pip install -e .[dev]' when switching between checkouts and running tests.
a : Optional[int] = abspath(join(dirname(__file__), '''src'''))
sys.path.insert(1, git_repo_path)
# silence FutureWarning warnings in tests since often we can't act on them until
# they become normal warnings - i.e. the tests still need to test the current functionality
warnings.simplefilter(action='''ignore''', category=FutureWarning)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
config.addinivalue_line(
"""markers""" , """is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested""" )
config.addinivalue_line(
"""markers""" , """is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested""" )
config.addinivalue_line("""markers""" , """is_pipeline_test: mark test to run only when pipelines are tested""" )
config.addinivalue_line("""markers""" , """is_staging_test: mark test to run only in the staging environment""" )
config.addinivalue_line("""markers""" , """accelerate_tests: mark test that require accelerate""" )
config.addinivalue_line("""markers""" , """tool_tests: mark the tool tests that are run on their specific schedule""" )
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
from transformers.testing_utils import pytest_addoption_shared
pytest_addoption_shared(__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
from transformers.testing_utils import pytest_terminal_summary_main
_UpperCAmelCase = terminalreporter.config.getoption("""--make-reports""" )
if make_reports:
pytest_terminal_summary_main(__SCREAMING_SNAKE_CASE , id=__SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
if exitstatus == 5:
_UpperCAmelCase = 0
# Doctest custom flag to ignore output.
a : Any = doctest.register_optionflag('''IGNORE_RESULT''')
a : int = doctest.OutputChecker
class a_ ( snake_case__ ):
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] ) ->Dict:
'''simple docstring'''
if IGNORE_RESULT & optionflags:
return True
return OutputChecker.check_output(self , lowercase_ , lowercase_ , lowercase_ )
a : List[str] = CustomOutputChecker
a : Any = HfDoctestModule
a : int = HfDocTestParser | 703 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( _A , _A , _A ) -> float:
"""simple docstring"""
_UpperCAmelCase = x
_UpperCAmelCase = y
for step in range(_A ): # noqa: B007
_UpperCAmelCase = a * a - b * b + x
_UpperCAmelCase = 2 * a * b + y
_UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_A , 1 , 1 ) )
def _UpperCamelCase ( _A = 8_0_0 , _A = 6_0_0 , _A = -0.6 , _A = 0 , _A = 3.2 , _A = 5_0 , _A = True , ) -> Image.Image:
"""simple docstring"""
_UpperCAmelCase = Image.new("""RGB""" , (image_width, image_height) )
_UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(_A ):
for image_y in range(_A ):
# determine the figure-coordinates based on the image-coordinates
_UpperCAmelCase = figure_width / image_width * image_height
_UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
_UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
_UpperCAmelCase = get_distance(_A , _A , _A )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_UpperCAmelCase = get_color_coded_rgb(_A )
else:
_UpperCAmelCase = get_black_and_white_rgb(_A )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 19 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : Tuple = {
"configuration_mega": ["MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP", "MegaConfig", "MegaOnnxConfig"],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Optional[Any] = [
"MEGA_PRETRAINED_MODEL_ARCHIVE_LIST",
"MegaForCausalLM",
"MegaForMaskedLM",
"MegaForMultipleChoice",
"MegaForQuestionAnswering",
"MegaForSequenceClassification",
"MegaForTokenClassification",
"MegaModel",
"MegaPreTrainedModel",
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
a : Optional[Any] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 704 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a_ ( nn.Module ):
def __init__( self : List[str] , __UpperCamelCase : int = 16 , __UpperCamelCase : int = 88 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 32 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "geglu" , __UpperCamelCase : Optional[int] = None , ) ->Dict:
'''simple docstring'''
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__UpperCamelCase , attention_head_dim=__UpperCamelCase , in_channels=__UpperCamelCase , num_layers=__UpperCamelCase , dropout=__UpperCamelCase , norm_num_groups=__UpperCamelCase , cross_attention_dim=__UpperCamelCase , attention_bias=__UpperCamelCase , sample_size=__UpperCamelCase , num_vector_embeds=__UpperCamelCase , activation_fn=__UpperCamelCase , num_embeds_ada_norm=__UpperCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def _snake_case ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : bool = True , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase , cross_attention_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
import argparse
import os
from transformers.utils import direct_transformers_import
# All paths are set with the intent you should run this script from the root of the repo with the command
# python utils/check_task_guides.py
a : str = 'src/transformers'
a : Union[str, Any] = 'docs/source/en/tasks'
def _UpperCamelCase ( _A , _A , _A ) -> Tuple:
"""simple docstring"""
with open(_lowerCamelCase , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase = f.readlines()
# Find the start prompt.
_UpperCAmelCase = 0
while not lines[start_index].startswith(_lowerCamelCase ):
start_index += 1
start_index += 1
_UpperCAmelCase = start_index
while not lines[end_index].startswith(_lowerCamelCase ):
end_index += 1
end_index -= 1
while len(lines[start_index] ) <= 1:
start_index += 1
while len(lines[end_index] ) <= 1:
end_index -= 1
end_index += 1
return "".join(lines[start_index:end_index] ), start_index, end_index, lines
# This is to make sure the transformers module imported is the one in the repo.
a : Dict = direct_transformers_import(TRANSFORMERS_PATH)
a : Dict = {
'asr.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CTC_MAPPING_NAMES,
'audio_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_AUDIO_CLASSIFICATION_MAPPING_NAMES,
'language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_CAUSAL_LM_MAPPING_NAMES,
'image_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_IMAGE_CLASSIFICATION_MAPPING_NAMES,
'masked_language_modeling.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MASKED_LM_MAPPING_NAMES,
'multiple_choice.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_MULTIPLE_CHOICE_MAPPING_NAMES,
'object_detection.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_OBJECT_DETECTION_MAPPING_NAMES,
'question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_QUESTION_ANSWERING_MAPPING_NAMES,
'semantic_segmentation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEMANTIC_SEGMENTATION_MAPPING_NAMES,
'sequence_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING_NAMES,
'summarization.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'token_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING_NAMES,
'translation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING_NAMES,
'video_classification.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_VIDEO_CLASSIFICATION_MAPPING_NAMES,
'document_question_answering.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DOCUMENT_QUESTION_ANSWERING_MAPPING_NAMES,
'monocular_depth_estimation.md': transformers_module.models.auto.modeling_auto.MODEL_FOR_DEPTH_ESTIMATION_MAPPING_NAMES,
}
# This list contains model types used in some task guides that are not in `CONFIG_MAPPING_NAMES` (therefore not in any
# `MODEL_MAPPING_NAMES` or any `MODEL_FOR_XXX_MAPPING_NAMES`).
a : Optional[Any] = {
'summarization.md': ('nllb',),
'translation.md': ('nllb',),
}
def _UpperCamelCase ( _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = TASK_GUIDE_TO_MODELS[task_guide]
_UpperCAmelCase = SPECIAL_TASK_GUIDE_TO_MODEL_TYPES.get(_lowerCamelCase , set() )
_UpperCAmelCase = {
code: name
for code, name in transformers_module.MODEL_NAMES_MAPPING.items()
if (code in model_maping_names or code in special_model_types)
}
return ", ".join([F"""[{name}](../model_doc/{code})""" for code, name in model_names.items()] ) + "\n"
def _UpperCamelCase ( _A , _A=False ) -> Any:
"""simple docstring"""
_UpperCAmelCase = _find_text_in_file(
filename=os.path.join(_lowerCamelCase , _lowerCamelCase ) , start_prompt="""<!--This tip is automatically generated by `make fix-copies`, do not fill manually!-->""" , end_prompt="""<!--End of the generated tip-->""" , )
_UpperCAmelCase = get_model_list_for_task(_lowerCamelCase )
if current_list != new_list:
if overwrite:
with open(os.path.join(_lowerCamelCase , _lowerCamelCase ) , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(lines[:start_index] + [new_list] + lines[end_index:] )
else:
raise ValueError(
F"""The list of models that can be used in the {task_guide} guide needs an update. Run `make fix-copies`"""
""" to fix this.""" )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
parser.add_argument('''--fix_and_overwrite''', action='''store_true''', help='''Whether to fix inconsistencies.''')
a : Dict = parser.parse_args()
for task_guide in TASK_GUIDE_TO_MODELS.keys():
check_model_list_for_task(task_guide, args.fix_and_overwrite) | 705 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( _A , _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = LxmertConfig.from_json_file(_A )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase = LxmertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_A , _A , _A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 19 | 0 |
import argparse
import json
import os
import evaluate
import torch
from datasets import load_dataset
from torch.optim import AdamW
from torch.utils.data import DataLoader
from transformers import AutoModelForSequenceClassification, AutoTokenizer, get_linear_schedule_with_warmup, set_seed
from accelerate import Accelerator, DistributedType
from accelerate.utils.deepspeed import DummyOptim, DummyScheduler
a : List[str] = 1_6
a : Any = 3_2
def _UpperCamelCase ( _A , _A = 1_6 , _A = "bert-base-cased" ) -> Any:
"""simple docstring"""
_UpperCAmelCase = AutoTokenizer.from_pretrained(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = load_dataset("""glue""" , """mrpc""" )
def tokenize_function(_A ):
# max_length=None => use the model max length (it's actually the default)
_UpperCAmelCase = tokenizer(examples["""sentence1"""] , examples["""sentence2"""] , truncation=_SCREAMING_SNAKE_CASE , max_length=_SCREAMING_SNAKE_CASE )
return outputs
# Apply the method we just defined to all the examples in all the splits of the dataset
_UpperCAmelCase = datasets.map(
_SCREAMING_SNAKE_CASE , batched=_SCREAMING_SNAKE_CASE , remove_columns=["""idx""", """sentence1""", """sentence2"""] , load_from_cache_file=_SCREAMING_SNAKE_CASE )
# We also rename the 'label' column to 'labels' which is the expected name for labels by the models of the
# transformers library
_UpperCAmelCase = tokenized_datasets.rename_column("""label""" , """labels""" )
def collate_fn(_A ):
# On TPU it's best to pad everything to the same length or training will be very slow.
if accelerator.distributed_type == DistributedType.TPU:
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""max_length""" , max_length=1_2_8 , return_tensors="""pt""" )
return tokenizer.pad(_SCREAMING_SNAKE_CASE , padding="""longest""" , return_tensors="""pt""" )
# Instantiate dataloaders.
_UpperCAmelCase = DataLoader(
tokenized_datasets["""train"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = DataLoader(
tokenized_datasets["""validation"""] , shuffle=_SCREAMING_SNAKE_CASE , collate_fn=_SCREAMING_SNAKE_CASE , batch_size=_SCREAMING_SNAKE_CASE )
return train_dataloader, eval_dataloader
def _UpperCamelCase ( _A , _A , _A , _A ) -> Dict:
"""simple docstring"""
model.eval()
_UpperCAmelCase = 0
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
# We could avoid this line since we set the accelerator with `device_placement=True`.
batch.to(accelerator.device )
with torch.no_grad():
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.logits.argmax(dim=-1 )
# It is slightly faster to call this once, than multiple times
_UpperCAmelCase ,_UpperCAmelCase = accelerator.gather(
(predictions, batch["""labels"""]) ) # If we are in a multiprocess environment, the last batch has duplicates
if accelerator.use_distributed:
if step == len(_SCREAMING_SNAKE_CASE ) - 1:
_UpperCAmelCase = predictions[: len(eval_dataloader.dataset ) - samples_seen]
_UpperCAmelCase = references[: len(eval_dataloader.dataset ) - samples_seen]
else:
samples_seen += references.shape[0]
metric.add_batch(
predictions=_SCREAMING_SNAKE_CASE , references=_SCREAMING_SNAKE_CASE , )
_UpperCAmelCase = metric.compute()
return eval_metric["accuracy"]
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Accelerator()
# Sample hyper-parameters for learning rate, batch size, seed and a few other HPs
_UpperCAmelCase = config["""lr"""]
_UpperCAmelCase = int(config["""num_epochs"""] )
_UpperCAmelCase = int(config["""seed"""] )
_UpperCAmelCase = int(config["""batch_size"""] )
_UpperCAmelCase = args.model_name_or_path
set_seed(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase ,_UpperCAmelCase = get_dataloaders(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# Instantiate the model (we build the model here so that the seed also control new weights initialization)
_UpperCAmelCase = AutoModelForSequenceClassification.from_pretrained(_SCREAMING_SNAKE_CASE , return_dict=_SCREAMING_SNAKE_CASE )
# Instantiate optimizer
_UpperCAmelCase = (
AdamW
if accelerator.state.deepspeed_plugin is None
or """optimizer""" not in accelerator.state.deepspeed_plugin.deepspeed_config
else DummyOptim
)
_UpperCAmelCase = optimizer_cls(params=model.parameters() , lr=_SCREAMING_SNAKE_CASE )
if accelerator.state.deepspeed_plugin is not None:
_UpperCAmelCase = accelerator.state.deepspeed_plugin.deepspeed_config[
"""gradient_accumulation_steps"""
]
else:
_UpperCAmelCase = 1
_UpperCAmelCase = (len(_SCREAMING_SNAKE_CASE ) * num_epochs) // gradient_accumulation_steps
# Instantiate scheduler
if (
accelerator.state.deepspeed_plugin is None
or "scheduler" not in accelerator.state.deepspeed_plugin.deepspeed_config
):
_UpperCAmelCase = get_linear_schedule_with_warmup(
optimizer=_SCREAMING_SNAKE_CASE , num_warmup_steps=0 , num_training_steps=_SCREAMING_SNAKE_CASE , )
else:
_UpperCAmelCase = DummyScheduler(_SCREAMING_SNAKE_CASE , total_num_steps=_SCREAMING_SNAKE_CASE , warmup_num_steps=0 )
# Prepare everything
# There is no specific order to remember, we just need to unpack the objects in the same order we gave them to the
# prepare method.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = accelerator.prepare(
_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
# We need to keep track of how many total steps we have iterated over
_UpperCAmelCase = 0
# We also need to keep track of the stating epoch so files are named properly
_UpperCAmelCase = 0
_UpperCAmelCase = evaluate.load("""glue""" , """mrpc""" )
_UpperCAmelCase = num_epochs
if args.partial_train_epoch is not None:
_UpperCAmelCase = args.partial_train_epoch
if args.resume_from_checkpoint:
accelerator.load_state(args.resume_from_checkpoint )
_UpperCAmelCase = args.resume_from_checkpoint.split("""epoch_""" )[1]
_UpperCAmelCase = """"""
for char in epoch_string:
if char.isdigit():
state_epoch_num += char
else:
break
_UpperCAmelCase = int(_SCREAMING_SNAKE_CASE ) + 1
_UpperCAmelCase = evaluation_loop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
accelerator.print("""resumed checkpoint performance:""" , _SCREAMING_SNAKE_CASE )
accelerator.print("""resumed checkpoint\'s scheduler\'s lr:""" , lr_scheduler.get_lr()[0] )
accelerator.print("""resumed optimizers\'s lr:""" , optimizer.param_groups[0]["""lr"""] )
with open(os.path.join(args.output_dir , F"""state_{starting_epoch-1}.json""" ) , """r""" ) as f:
_UpperCAmelCase = json.load(_SCREAMING_SNAKE_CASE )
assert resumed_state["accuracy"] == accuracy, "Accuracy mismatch, loading from checkpoint failed"
assert (
resumed_state["lr"] == lr_scheduler.get_lr()[0]
), "Scheduler learning rate mismatch, loading from checkpoint failed"
assert (
resumed_state["optimizer_lr"] == optimizer.param_groups[0]["lr"]
), "Optimizer learning rate mismatch, loading from checkpoint failed"
assert resumed_state["epoch"] == starting_epoch - 1, "Epoch mismatch, loading from checkpoint failed"
return
# Now we train the model
_UpperCAmelCase = {}
for epoch in range(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE ):
model.train()
for step, batch in enumerate(_SCREAMING_SNAKE_CASE ):
_UpperCAmelCase = model(**_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = outputs.loss
_UpperCAmelCase = loss / gradient_accumulation_steps
accelerator.backward(_SCREAMING_SNAKE_CASE )
if step % gradient_accumulation_steps == 0:
optimizer.step()
lr_scheduler.step()
optimizer.zero_grad()
overall_step += 1
_UpperCAmelCase = F"""epoch_{epoch}"""
_UpperCAmelCase = os.path.join(args.output_dir , _SCREAMING_SNAKE_CASE )
accelerator.save_state(_SCREAMING_SNAKE_CASE )
_UpperCAmelCase = evaluation_loop(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
_UpperCAmelCase = accuracy
_UpperCAmelCase = lr_scheduler.get_lr()[0]
_UpperCAmelCase = optimizer.param_groups[0]["""lr"""]
_UpperCAmelCase = epoch
_UpperCAmelCase = overall_step
accelerator.print(F"""epoch {epoch}:""" , _SCREAMING_SNAKE_CASE )
accelerator.wait_for_everyone()
if accelerator.is_main_process:
with open(os.path.join(args.output_dir , F"""state_{epoch}.json""" ) , """w""" ) as f:
json.dump(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Simple example of training script tracking peak GPU memory usage.""" )
parser.add_argument(
"""--model_name_or_path""" , type=_SCREAMING_SNAKE_CASE , default="""bert-base-cased""" , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_SCREAMING_SNAKE_CASE , )
parser.add_argument(
"""--output_dir""" , type=_SCREAMING_SNAKE_CASE , default=""".""" , help="""Optional save directory where all checkpoint folders will be stored. Default is the current working directory.""" , )
parser.add_argument(
"""--resume_from_checkpoint""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""If the training should continue from a checkpoint folder.""" , )
parser.add_argument(
"""--partial_train_epoch""" , type=_SCREAMING_SNAKE_CASE , default=_SCREAMING_SNAKE_CASE , help="""If passed, the training will stop after this number of epochs.""" , )
parser.add_argument(
"""--num_epochs""" , type=_SCREAMING_SNAKE_CASE , default=2 , help="""Number of train epochs.""" , )
_UpperCAmelCase = parser.parse_args()
_UpperCAmelCase = {"""lr""": 2e-5, """num_epochs""": args.num_epochs, """seed""": 4_2, """batch_size""": 1_6}
training_function(_SCREAMING_SNAKE_CASE , _SCREAMING_SNAKE_CASE )
if __name__ == "__main__":
main() | 706 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
a : str = '''examples/'''
a : List[str] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a : Tuple = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
a : List[str] = '''README.md'''
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase ,_UpperCAmelCase = REPLACE_PATTERNS[pattern]
_UpperCAmelCase = replace.replace("""VERSION""" , _A )
_UpperCAmelCase = re_pattern.sub(_A , _A )
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_A )
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern="""examples""" )
def _UpperCamelCase ( _A , _A=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
_UpperCAmelCase = """🤗 Transformers currently provides the following architectures"""
_UpperCAmelCase = """1. Want to contribute a new model?"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase = f.readlines()
# Find the start of the list.
_UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
_UpperCAmelCase = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_A )
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = REPLACE_PATTERNS["""init"""][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def _UpperCamelCase ( _A=False ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
_UpperCAmelCase = default_version.base_version
elif patch:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_UpperCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
_UpperCAmelCase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
def _UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_version()
_UpperCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
_UpperCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
_UpperCAmelCase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_A )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work() | 19 | 0 |
"""simple docstring"""
import dataclasses
import json
import sys
import types
from argparse import ArgumentDefaultsHelpFormatter, ArgumentParser, ArgumentTypeError
from copy import copy
from enum import Enum
from inspect import isclass
from pathlib import Path
from typing import Any, Callable, Dict, Iterable, List, Literal, NewType, Optional, Tuple, Union, get_type_hints
import yaml
a : Optional[int] = NewType('''DataClass''', Any)
a : int = NewType('''DataClassType''', Any)
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
if isinstance(_A , _A ):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise ArgumentTypeError(
F"""Truthy value expected: got {v} but expected one of yes/no, true/false, t/f, y/n, 1/0 (case insensitive).""" )
def _UpperCamelCase ( _A ) -> Callable[[str], Any]:
"""simple docstring"""
_UpperCAmelCase = {str(_A ): choice for choice in choices}
return lambda _A : str_to_choice.get(_A , _A )
def _UpperCamelCase ( *,
_A = None , _A = None , _A = dataclasses.MISSING , _A = dataclasses.MISSING , _A = None , **_A , ) -> dataclasses.Field:
"""simple docstring"""
if metadata is None:
# Important, don't use as default param in function signature because dict is mutable and shared across function calls
_UpperCAmelCase = {}
if aliases is not None:
_UpperCAmelCase = aliases
if help is not None:
_UpperCAmelCase = help
return dataclasses.field(metadata=_A , default=_A , default_factory=_A , **_A )
class a_ ( A_ ):
a : Iterable[DataClassType]
def __init__( self : Dict , __UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ) ->List[str]:
'''simple docstring'''
if "formatter_class" not in kwargs:
_UpperCAmelCase = ArgumentDefaultsHelpFormatter
super().__init__(**__UpperCamelCase )
if dataclasses.is_dataclass(__UpperCamelCase ):
_UpperCAmelCase = [dataclass_types]
_UpperCAmelCase = list(__UpperCamelCase )
for dtype in self.dataclass_types:
self._add_dataclass_arguments(__UpperCamelCase )
@staticmethod
def _snake_case ( __UpperCamelCase : Any , __UpperCamelCase : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = f"""--{field.name}"""
_UpperCAmelCase = field.metadata.copy()
# field.metadata is not used at all by Data Classes,
# it is provided as a third-party extension mechanism.
if isinstance(field.type , __UpperCamelCase ):
raise RuntimeError(
"""Unresolved type detected, which should have been done with the help of """
"""`typing.get_type_hints` method by default""" )
_UpperCAmelCase = kwargs.pop("""aliases""" , [] )
if isinstance(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = [aliases]
_UpperCAmelCase = getattr(field.type , """__origin__""" , field.type )
if origin_type is Union or (hasattr(__UpperCamelCase , """UnionType""" ) and isinstance(__UpperCamelCase , types.UnionType )):
if str not in field.type.__args__ and (
len(field.type.__args__ ) != 2 or type(__UpperCamelCase ) not in field.type.__args__
):
raise ValueError(
"""Only `Union[X, NoneType]` (i.e., `Optional[X]`) is allowed for `Union` because"""
""" the argument parser only supports one type per argument."""
f""" Problem encountered in field '{field.name}'.""" )
if type(__UpperCamelCase ) not in field.type.__args__:
# filter `str` in Union
_UpperCAmelCase = field.type.__args__[0] if field.type.__args__[1] == str else field.type.__args__[1]
_UpperCAmelCase = getattr(field.type , """__origin__""" , field.type )
elif bool not in field.type.__args__:
# filter `NoneType` in Union (except for `Union[bool, NoneType]`)
_UpperCAmelCase = (
field.type.__args__[0] if isinstance(__UpperCamelCase , field.type.__args__[1] ) else field.type.__args__[1]
)
_UpperCAmelCase = getattr(field.type , """__origin__""" , field.type )
# A variable to store kwargs for a boolean field, if needed
# so that we can init a `no_*` complement argument (see below)
_UpperCAmelCase = {}
if origin_type is Literal or (isinstance(field.type , __UpperCamelCase ) and issubclass(field.type , __UpperCamelCase )):
if origin_type is Literal:
_UpperCAmelCase = field.type.__args__
else:
_UpperCAmelCase = [x.value for x in field.type]
_UpperCAmelCase = make_choice_type_function(kwargs["""choices"""] )
if field.default is not dataclasses.MISSING:
_UpperCAmelCase = field.default
else:
_UpperCAmelCase = True
elif field.type is bool or field.type == Optional[bool]:
# Copy the currect kwargs to use to instantiate a `no_*` complement argument below.
# We do not initialize it here because the `no_*` alternative must be instantiated after the real argument
_UpperCAmelCase = copy(__UpperCamelCase )
# Hack because type=bool in argparse does not behave as we want.
_UpperCAmelCase = string_to_bool
if field.type is bool or (field.default is not None and field.default is not dataclasses.MISSING):
# Default value is False if we have no default when of type bool.
_UpperCAmelCase = False if field.default is dataclasses.MISSING else field.default
# This is the value that will get picked if we don't include --field_name in any way
_UpperCAmelCase = default
# This tells argparse we accept 0 or 1 value after --field_name
_UpperCAmelCase = """?"""
# This is the value that will get picked if we do --field_name (without value)
_UpperCAmelCase = True
elif isclass(__UpperCamelCase ) and issubclass(__UpperCamelCase , __UpperCamelCase ):
_UpperCAmelCase = field.type.__args__[0]
_UpperCAmelCase = """+"""
if field.default_factory is not dataclasses.MISSING:
_UpperCAmelCase = field.default_factory()
elif field.default is dataclasses.MISSING:
_UpperCAmelCase = True
else:
_UpperCAmelCase = field.type
if field.default is not dataclasses.MISSING:
_UpperCAmelCase = field.default
elif field.default_factory is not dataclasses.MISSING:
_UpperCAmelCase = field.default_factory()
else:
_UpperCAmelCase = True
parser.add_argument(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
# Add a complement `no_*` argument for a boolean field AFTER the initial field has already been added.
# Order is important for arguments with the same destination!
# We use a copy of earlier kwargs because the original kwargs have changed a lot before reaching down
# here and we do not need those changes/additional keys.
if field.default is True and (field.type is bool or field.type == Optional[bool]):
_UpperCAmelCase = False
parser.add_argument(f"""--no_{field.name}""" , action="""store_false""" , dest=field.name , **__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : int ) ->Tuple:
'''simple docstring'''
if hasattr(__UpperCamelCase , """_argument_group_name""" ):
_UpperCAmelCase = self.add_argument_group(dtype._argument_group_name )
else:
_UpperCAmelCase = self
try:
_UpperCAmelCase = get_type_hints(__UpperCamelCase )
except NameError:
raise RuntimeError(
f"""Type resolution failed for {dtype}. Try declaring the class in global scope or """
"""removing line of `from __future__ import annotations` which opts in Postponed """
"""Evaluation of Annotations (PEP 563)""" )
except TypeError as ex:
# Remove this block when we drop Python 3.9 support
if sys.version_info[:2] < (3, 10) and "unsupported operand type(s) for |" in str(__UpperCamelCase ):
_UpperCAmelCase = """.""".join(map(__UpperCamelCase , sys.version_info[:3] ) )
raise RuntimeError(
f"""Type resolution failed for {dtype} on Python {python_version}. Try removing """
"""line of `from __future__ import annotations` which opts in union types as """
"""`X | Y` (PEP 604) via Postponed Evaluation of Annotations (PEP 563). To """
"""support Python versions that lower than 3.10, you need to use """
"""`typing.Union[X, Y]` instead of `X | Y` and `typing.Optional[X]` instead of """
"""`X | None`.""" ) from ex
raise
for field in dataclasses.fields(__UpperCamelCase ):
if not field.init:
continue
_UpperCAmelCase = type_hints[field.name]
self._parse_dataclass_field(__UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Tuple=None , __UpperCamelCase : Tuple=False , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : int=None , __UpperCamelCase : List[Any]=None , ) ->List[Any]:
'''simple docstring'''
if args_file_flag or args_filename or (look_for_args_file and len(sys.argv )):
_UpperCAmelCase = []
if args_filename:
args_files.append(Path(__UpperCamelCase ) )
elif look_for_args_file and len(sys.argv ):
args_files.append(Path(sys.argv[0] ).with_suffix(""".args""" ) )
# args files specified via command line flag should overwrite default args files so we add them last
if args_file_flag:
# Create special parser just to extract the args_file_flag values
_UpperCAmelCase = ArgumentParser()
args_file_parser.add_argument(__UpperCamelCase , type=__UpperCamelCase , action="""append""" )
# Use only remaining args for further parsing (remove the args_file_flag)
_UpperCAmelCase = args_file_parser.parse_known_args(args=__UpperCamelCase )
_UpperCAmelCase = vars(__UpperCamelCase ).get(args_file_flag.lstrip("""-""" ) , __UpperCamelCase )
if cmd_args_file_paths:
args_files.extend([Path(__UpperCamelCase ) for p in cmd_args_file_paths] )
_UpperCAmelCase = []
for args_file in args_files:
if args_file.exists():
file_args += args_file.read_text().split()
# in case of duplicate arguments the last one has precedence
# args specified via the command line should overwrite args from files, so we add them last
_UpperCAmelCase = file_args + args if args is not None else file_args + sys.argv[1:]
_UpperCAmelCase = self.parse_known_args(args=__UpperCamelCase )
_UpperCAmelCase = []
for dtype in self.dataclass_types:
_UpperCAmelCase = {f.name for f in dataclasses.fields(__UpperCamelCase ) if f.init}
_UpperCAmelCase = {k: v for k, v in vars(__UpperCamelCase ).items() if k in keys}
for k in keys:
delattr(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = dtype(**__UpperCamelCase )
outputs.append(__UpperCamelCase )
if len(namespace.__dict__ ) > 0:
# additional namespace.
outputs.append(__UpperCamelCase )
if return_remaining_strings:
return (*outputs, remaining_args)
else:
if remaining_args:
raise ValueError(f"""Some specified arguments are not used by the HfArgumentParser: {remaining_args}""" )
return (*outputs,)
def _snake_case ( self : str , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Optional[Any] = False ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = set(args.keys() )
_UpperCAmelCase = []
for dtype in self.dataclass_types:
_UpperCAmelCase = {f.name for f in dataclasses.fields(__UpperCamelCase ) if f.init}
_UpperCAmelCase = {k: v for k, v in args.items() if k in keys}
unused_keys.difference_update(inputs.keys() )
_UpperCAmelCase = dtype(**__UpperCamelCase )
outputs.append(__UpperCamelCase )
if not allow_extra_keys and unused_keys:
raise ValueError(f"""Some keys are not used by the HfArgumentParser: {sorted(__UpperCamelCase )}""" )
return tuple(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : int , __UpperCamelCase : Union[str, Any] = False ) ->str:
'''simple docstring'''
with open(Path(__UpperCamelCase ) , encoding="""utf-8""" ) as open_json_file:
_UpperCAmelCase = json.loads(open_json_file.read() )
_UpperCAmelCase = self.parse_dict(__UpperCamelCase , allow_extra_keys=__UpperCamelCase )
return tuple(__UpperCamelCase )
def _snake_case ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : List[str] = False ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.parse_dict(yaml.safe_load(Path(__UpperCamelCase ).read_text() ) , allow_extra_keys=__UpperCamelCase )
return tuple(__UpperCamelCase ) | 707 |
"""simple docstring"""
from __future__ import annotations
def _UpperCamelCase ( _A ) -> None:
"""simple docstring"""
create_state_space_tree(_A , [] , 0 , [0 for i in range(len(_A ) )] )
def _UpperCamelCase ( _A , _A , _A , _A , ) -> None:
"""simple docstring"""
if index == len(_A ):
print(_A )
return
for i in range(len(_A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_UpperCAmelCase = True
create_state_space_tree(_A , _A , index + 1 , _A )
current_sequence.pop()
_UpperCAmelCase = False
a : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
a : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 19 | 0 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class a_ ( __A , unittest.TestCase ):
a : List[Any] = RoFormerTokenizer
a : List[Any] = RoFormerTokenizerFast
a : Optional[int] = True
a : Union[str, Any] = True
def _snake_case ( self : Dict ) ->List[Any]:
'''simple docstring'''
super().setUp()
def _snake_case ( self : Dict , **__UpperCamelCase : str ) ->str:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **UpperCamelCase__ )
def _snake_case ( self : Any , **__UpperCamelCase : Any ) ->Any:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **UpperCamelCase__ )
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = '永和服装饰品有限公司,今天天气非常好'
_UpperCAmelCase = '永和 服装 饰品 有限公司 , 今 天 天 气 非常 好'
return input_text, output_text
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase = self.get_chinese_input_output_texts()
_UpperCAmelCase = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , output_text.split() )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase = self.get_chinese_input_output_texts()
_UpperCAmelCase = tokenizer.tokenize(UpperCamelCase__ )
self.assertListEqual(UpperCamelCase__ , output_text.split() )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(UpperCamelCase__ ) , UpperCamelCase__ )
def _snake_case ( self : Tuple ) ->Dict:
'''simple docstring'''
pass
def _snake_case ( self : Dict ) ->Optional[int]:
'''simple docstring'''
pass
def _snake_case ( self : int ) ->List[str]:
'''simple docstring'''
pass | 708 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : int=32 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : Dict=3 , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=32 , __UpperCamelCase : Any=4 , __UpperCamelCase : Optional[int]=[0, 1, 2, 3] , __UpperCamelCase : str=4 , __UpperCamelCase : Optional[Any]=37 , __UpperCamelCase : str="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Any=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : int=[1, 3_84, 24, 24] , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any=None , ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = backbone_out_indices
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = backbone_featmap_shape
_UpperCAmelCase = scope
_UpperCAmelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 1_92, 3_84, 7_68],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _snake_case ( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _snake_case ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a : int = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : str = False
a : List[str] = False
a : Dict = False
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = DPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def _snake_case ( self : Optional[int] ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
pass
def _snake_case ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def _snake_case ( self : str ) ->Any:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ):
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = False
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
_UpperCAmelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_UpperCAmelCase = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self : Dict ) ->Tuple:
'''simple docstring'''
pass
@slow
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_UpperCAmelCase = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = """add"""
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class a_ ( unittest.TestCase ):
def _snake_case ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
_UpperCAmelCase = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
_UpperCAmelCase = outputs.predicted_depth
# verify the predicted depth
_UpperCAmelCase = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , __UpperCamelCase , atol=1e-4 ) ) | 19 | 0 |
"""simple docstring"""
import json
import os
from typing import Optional, Tuple
import regex as re
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
a : Union[str, Any] = logging.get_logger(__name__)
a : Union[str, Any] = {
'''vocab_file''': '''vocab.json''',
'''merges_file''': '''merges.txt''',
}
a : Optional[Any] = {
'''vocab_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-vocab.json'''},
'''merges_file''': {'''ctrl''': '''https://raw.githubusercontent.com/salesforce/ctrl/master/ctrl-merges.txt'''},
}
a : Tuple = {
'''ctrl''': 2_5_6,
}
a : List[Any] = {
'''Pregnancy''': 1_6_8_6_2_9,
'''Christianity''': 7_6_7_5,
'''Explain''': 1_0_6_4_2_3,
'''Fitness''': 6_3_4_4_0,
'''Saving''': 6_3_1_6_3,
'''Ask''': 2_7_1_7_1,
'''Ass''': 9_5_9_8_5,
'''Joke''': 1_6_3_5_0_9,
'''Questions''': 4_5_6_2_2,
'''Thoughts''': 4_9_6_0_5,
'''Retail''': 5_2_3_4_2,
'''Feminism''': 1_6_4_3_3_8,
'''Writing''': 1_1_9_9_2,
'''Atheism''': 1_9_2_2_6_3,
'''Netflix''': 4_8_6_1_6,
'''Computing''': 3_9_6_3_9,
'''Opinion''': 4_3_2_1_3,
'''Alone''': 4_4_9_6_7,
'''Funny''': 5_8_9_1_7,
'''Gaming''': 4_0_3_5_8,
'''Human''': 4_0_8_8,
'''India''': 1_3_3_1,
'''Joker''': 7_7_1_3_8,
'''Diet''': 3_6_2_0_6,
'''Legal''': 1_1_8_5_9,
'''Norman''': 4_9_3_9,
'''Tip''': 7_2_6_8_9,
'''Weight''': 5_2_3_4_3,
'''Movies''': 4_6_2_7_3,
'''Running''': 2_3_4_2_5,
'''Science''': 2_0_9_0,
'''Horror''': 3_7_7_9_3,
'''Confession''': 6_0_5_7_2,
'''Finance''': 1_2_2_5_0,
'''Politics''': 1_6_3_6_0,
'''Scary''': 1_9_1_9_8_5,
'''Support''': 1_2_6_5_4,
'''Technologies''': 3_2_5_1_6,
'''Teenage''': 6_6_1_6_0,
'''Event''': 3_2_7_6_9,
'''Learned''': 6_7_4_6_0,
'''Notion''': 1_8_2_7_7_0,
'''Wikipedia''': 3_7_5_8_3,
'''Books''': 6_6_6_5,
'''Extract''': 7_6_0_5_0,
'''Confessions''': 1_0_2_7_0_1,
'''Conspiracy''': 7_5_9_3_2,
'''Links''': 6_3_6_7_4,
'''Narcissus''': 1_5_0_4_2_5,
'''Relationship''': 5_4_7_6_6,
'''Relationships''': 1_3_4_7_9_6,
'''Reviews''': 4_1_6_7_1,
'''News''': 4_2_5_6,
'''Translation''': 2_6_8_2_0,
'''multilingual''': 1_2_8_4_0_6,
}
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = set()
_UpperCAmelCase = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
_UpperCAmelCase = char
_UpperCAmelCase = set(_A )
return pairs
class a_ ( a__ ):
a : List[Any] = VOCAB_FILES_NAMES
a : Dict = PRETRAINED_VOCAB_FILES_MAP
a : Tuple = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Union[str, Any] = CONTROL_CODES
def __init__( self : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict="<unk>" , **__UpperCamelCase : Optional[Any] ) ->List[str]:
'''simple docstring'''
super().__init__(unk_token=lowerCamelCase_ , **lowerCamelCase_ )
with open(lowerCamelCase_ , encoding="""utf-8""" ) as vocab_handle:
_UpperCAmelCase = json.load(lowerCamelCase_ )
_UpperCAmelCase = {v: k for k, v in self.encoder.items()}
with open(lowerCamelCase_ , encoding="""utf-8""" ) as merges_handle:
_UpperCAmelCase = merges_handle.read().split("""\n""" )[1:-1]
_UpperCAmelCase = [tuple(merge.split() ) for merge in merges]
_UpperCAmelCase = dict(zip(lowerCamelCase_ , range(len(lowerCamelCase_ ) ) ) )
_UpperCAmelCase = {}
@property
def _snake_case ( self : str ) ->int:
'''simple docstring'''
return len(self.encoder )
def _snake_case ( self : List[str] ) ->List[Any]:
'''simple docstring'''
return dict(self.encoder , **self.added_tokens_encoder )
def _snake_case ( self : str , __UpperCamelCase : Any ) ->List[str]:
'''simple docstring'''
if token in self.cache:
return self.cache[token]
_UpperCAmelCase = tuple(lowerCamelCase_ )
_UpperCAmelCase = tuple(list(word[:-1] ) + [word[-1] + """</w>"""] )
_UpperCAmelCase = get_pairs(lowerCamelCase_ )
if not pairs:
return token
while True:
_UpperCAmelCase = min(lowerCamelCase_ , key=lambda __UpperCamelCase : self.bpe_ranks.get(lowerCamelCase_ , float("""inf""" ) ) )
if bigram not in self.bpe_ranks:
break
_UpperCAmelCase ,_UpperCAmelCase = bigram
_UpperCAmelCase = []
_UpperCAmelCase = 0
while i < len(lowerCamelCase_ ):
try:
_UpperCAmelCase = word.index(lowerCamelCase_ , lowerCamelCase_ )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
_UpperCAmelCase = j
if word[i] == first and i < len(lowerCamelCase_ ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
_UpperCAmelCase = tuple(lowerCamelCase_ )
_UpperCAmelCase = new_word
if len(lowerCamelCase_ ) == 1:
break
else:
_UpperCAmelCase = get_pairs(lowerCamelCase_ )
_UpperCAmelCase = """@@ """.join(lowerCamelCase_ )
_UpperCAmelCase = word[:-4]
_UpperCAmelCase = word
return word
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : str ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = re.findall(r"""\S+\n?""" , lowerCamelCase_ )
for token in words:
split_tokens.extend(list(self.bpe(lowerCamelCase_ ).split(""" """ ) ) )
return split_tokens
def _snake_case ( self : Tuple , __UpperCamelCase : Optional[Any] ) ->str:
'''simple docstring'''
return self.encoder.get(lowerCamelCase_ , self.encoder.get(self.unk_token ) )
def _snake_case ( self : List[str] , __UpperCamelCase : Optional[Any] ) ->int:
'''simple docstring'''
return self.decoder.get(lowerCamelCase_ , self.unk_token )
def _snake_case ( self : Optional[int] , __UpperCamelCase : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = """ """.join(lowerCamelCase_ ).replace("""@@ """ , """""" ).strip()
return out_string
def _snake_case ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : List[str] = None ) ->Tuple[str]:
'''simple docstring'''
if not os.path.isdir(lowerCamelCase_ ):
logger.error(f"""Vocabulary path ({save_directory}) should be a directory""" )
return
_UpperCAmelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase = os.path.join(
lowerCamelCase_ , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""merges_file"""] )
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=lowerCamelCase_ , ensure_ascii=lowerCamelCase_ ) + """\n""" )
_UpperCAmelCase = 0
with open(lowerCamelCase_ , """w""" , encoding="""utf-8""" ) as writer:
writer.write("""#version: 0.2\n""" )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda __UpperCamelCase : kv[1] ):
if index != token_index:
logger.warning(
f"""Saving vocabulary to {merge_file}: BPE merge indices are not consecutive."""
""" Please check that the tokenizer is not corrupted!""" )
_UpperCAmelCase = token_index
writer.write(""" """.join(lowerCamelCase_ ) + """\n""" )
index += 1
return vocab_file, merge_file
# def decode(self, token_ids, skip_special_tokens=False, clean_up_tokenization_spaces=True):
# filtered_tokens = ' '.join(self.convert_ids_to_tokens(token_ids, skip_special_tokens=skip_special_tokens))
# tokens_generated_so_far = re.sub('(@@ )', '', string=filtered_tokens)
# tokens_generated_so_far = re.sub('(@@ ?$)', '', string=tokens_generated_so_far)
# return ''.join(tokens_generated_so_far) | 709 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a : List[str] = logging.get_logger(__name__)
class a_ ( enum.Enum ):
a : Optional[Any] = 0
a : Dict = 1
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'generated'
def __init__( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : str ) ->Any:
'''simple docstring'''
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _snake_case ( self : Optional[int] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Dict=None , **__UpperCamelCase : Any , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
if truncation is not None:
_UpperCAmelCase = truncation
_UpperCAmelCase = generate_kwargs
_UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
_UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCAmelCase = self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
_UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _snake_case ( self : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
return True
def _snake_case ( self : Optional[Any] , *__UpperCamelCase : Any , __UpperCamelCase : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
_UpperCAmelCase = ([prefix + arg for arg in args[0]],)
_UpperCAmelCase = True
elif isinstance(args[0] , __UpperCamelCase ):
_UpperCAmelCase = (prefix + args[0],)
_UpperCAmelCase = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
_UpperCAmelCase = self.tokenizer(*__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Dict , *__UpperCamelCase : str , **__UpperCamelCase : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = super().__call__(*__UpperCamelCase , **__UpperCamelCase )
if (
isinstance(args[0] , __UpperCamelCase )
and all(isinstance(__UpperCamelCase , __UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _snake_case ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : str=TruncationStrategy.DO_NOT_TRUNCATE , **__UpperCamelCase : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self._parse_and_tokenize(__UpperCamelCase , truncation=__UpperCamelCase , **__UpperCamelCase )
return inputs
def _snake_case ( self : str , __UpperCamelCase : Dict , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
if self.framework == "pt":
_UpperCAmelCase ,_UpperCAmelCase = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
_UpperCAmelCase ,_UpperCAmelCase = tf.shape(model_inputs["""input_ids"""] ).numpy()
_UpperCAmelCase = generate_kwargs.get("""min_length""" , self.model.config.min_length )
_UpperCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__UpperCamelCase , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
_UpperCAmelCase = self.model.generate(**__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCAmelCase = output_ids.reshape(__UpperCamelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
_UpperCAmelCase = tf.reshape(__UpperCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any]=ReturnType.TEXT , __UpperCamelCase : int=False ) ->Any:
'''simple docstring'''
_UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCAmelCase = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
_UpperCAmelCase = {
f"""{self.return_name}_text""": self.tokenizer.decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , )
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'summary'
def __call__( self : Optional[Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[int] ) ->Any:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : str , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : Optional[int] = 'translation'
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def _snake_case ( self : Tuple , *__UpperCamelCase : List[str] , __UpperCamelCase : Tuple=TruncationStrategy.DO_NOT_TRUNCATE , __UpperCamelCase : Tuple=None , __UpperCamelCase : Union[str, Any]=None ) ->Tuple:
'''simple docstring'''
if getattr(self.tokenizer , """_build_translation_inputs""" , __UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase , return_tensors=self.framework , truncation=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase , truncation=__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : int=None , __UpperCamelCase : int=None , **__UpperCamelCase : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
_UpperCAmelCase = src_lang
if tgt_lang is not None:
_UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCAmelCase = kwargs.get("""task""" , self.task )
_UpperCAmelCase = task.split("""_""" )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
_UpperCAmelCase = items[1]
_UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->int:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
import inspect
import os
import torch
from transformers import AutoModel
from transformers.testing_utils import mockenv_context
from transformers.trainer_utils import set_seed
import accelerate
from accelerate.accelerator import Accelerator
from accelerate.state import AcceleratorState
from accelerate.test_utils.testing import (
AccelerateTestCase,
TempDirTestCase,
execute_subprocess_async,
require_cuda,
require_fsdp,
require_multi_gpu,
slow,
)
from accelerate.utils.constants import (
FSDP_AUTO_WRAP_POLICY,
FSDP_BACKWARD_PREFETCH,
FSDP_SHARDING_STRATEGY,
FSDP_STATE_DICT_TYPE,
)
from accelerate.utils.dataclasses import FullyShardedDataParallelPlugin
from accelerate.utils.other import patch_environment
set_seed(4_2)
a : Dict = '''bert-base-cased'''
a : List[str] = '''fp16'''
a : Any = '''bf16'''
a : Optional[int] = [FPaa, BFaa]
@require_fsdp
@require_cuda
class a_ ( lowercase_ ):
def _snake_case ( self : List[str] ) ->Dict:
super().setUp()
_UpperCAmelCase = dict(
ACCELERATE_USE_FSDP="""true""" , MASTER_ADDR="""localhost""" , MASTER_PORT="""10999""" , RANK="""0""" , LOCAL_RANK="""0""" , WORLD_SIZE="""1""" , )
def _snake_case ( self : str ) ->Optional[int]:
from torch.distributed.fsdp.fully_sharded_data_parallel import ShardingStrategy
for i, strategy in enumerate(UpperCamelCase__ ):
_UpperCAmelCase = self.dist_env.copy()
_UpperCAmelCase = f"""{i + 1}"""
_UpperCAmelCase = strategy
with mockenv_context(**UpperCamelCase__ ):
_UpperCAmelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.sharding_strategy , ShardingStrategy(i + 1 ) )
def _snake_case ( self : str ) ->Dict:
from torch.distributed.fsdp.fully_sharded_data_parallel import BackwardPrefetch
for i, prefetch_policy in enumerate(UpperCamelCase__ ):
_UpperCAmelCase = self.dist_env.copy()
_UpperCAmelCase = prefetch_policy
with mockenv_context(**UpperCamelCase__ ):
_UpperCAmelCase = FullyShardedDataParallelPlugin()
if prefetch_policy == "NO_PREFETCH":
self.assertIsNone(fsdp_plugin.backward_prefetch )
else:
self.assertEqual(fsdp_plugin.backward_prefetch , BackwardPrefetch(i + 1 ) )
def _snake_case ( self : Optional[Any] ) ->Tuple:
from torch.distributed.fsdp.fully_sharded_data_parallel import StateDictType
for i, state_dict_type in enumerate(UpperCamelCase__ ):
_UpperCAmelCase = self.dist_env.copy()
_UpperCAmelCase = state_dict_type
with mockenv_context(**UpperCamelCase__ ):
_UpperCAmelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.state_dict_type , StateDictType(i + 1 ) )
if state_dict_type == "FULL_STATE_DICT":
self.assertTrue(fsdp_plugin.state_dict_config.offload_to_cpu )
self.assertTrue(fsdp_plugin.state_dict_config.ranka_only )
def _snake_case ( self : List[str] ) ->Optional[Any]:
_UpperCAmelCase = AutoModel.from_pretrained(UpperCamelCase__ )
for policy in FSDP_AUTO_WRAP_POLICY:
_UpperCAmelCase = self.dist_env.copy()
_UpperCAmelCase = policy
if policy == "TRANSFORMER_BASED_WRAP":
_UpperCAmelCase = """BertLayer"""
elif policy == "SIZE_BASED_WRAP":
_UpperCAmelCase = """2000"""
with mockenv_context(**UpperCamelCase__ ):
_UpperCAmelCase = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
if policy == "NO_WRAP":
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
else:
self.assertIsNotNone(fsdp_plugin.auto_wrap_policy )
_UpperCAmelCase = self.dist_env.copy()
_UpperCAmelCase = """TRANSFORMER_BASED_WRAP"""
_UpperCAmelCase = """T5Layer"""
with mockenv_context(**UpperCamelCase__ ):
_UpperCAmelCase = FullyShardedDataParallelPlugin()
with self.assertRaises(UpperCamelCase__ ) as cm:
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
self.assertTrue("""Could not find the transformer layer class to wrap in the model.""" in str(cm.exception ) )
_UpperCAmelCase = self.dist_env.copy()
_UpperCAmelCase = """SIZE_BASED_WRAP"""
_UpperCAmelCase = """0"""
with mockenv_context(**UpperCamelCase__ ):
_UpperCAmelCase = FullyShardedDataParallelPlugin()
fsdp_plugin.set_auto_wrap_policy(UpperCamelCase__ )
self.assertIsNone(fsdp_plugin.auto_wrap_policy )
def _snake_case ( self : Optional[Any] ) ->str:
from torch.distributed.fsdp.fully_sharded_data_parallel import MixedPrecision
from torch.distributed.fsdp.sharded_grad_scaler import ShardedGradScaler
for mp_dtype in dtypes:
_UpperCAmelCase = self.dist_env.copy()
_UpperCAmelCase = mp_dtype
with mockenv_context(**UpperCamelCase__ ):
_UpperCAmelCase = Accelerator()
if mp_dtype == "fp16":
_UpperCAmelCase = torch.floataa
elif mp_dtype == "bf16":
_UpperCAmelCase = torch.bfloataa
_UpperCAmelCase = MixedPrecision(param_dtype=UpperCamelCase__ , reduce_dtype=UpperCamelCase__ , buffer_dtype=UpperCamelCase__ )
self.assertEqual(accelerator.state.fsdp_plugin.mixed_precision_policy , UpperCamelCase__ )
if mp_dtype == FPaa:
self.assertTrue(isinstance(accelerator.scaler , UpperCamelCase__ ) )
elif mp_dtype == BFaa:
self.assertIsNone(accelerator.scaler )
AcceleratorState._reset_state(UpperCamelCase__ )
def _snake_case ( self : List[str] ) ->Dict:
from torch.distributed.fsdp.fully_sharded_data_parallel import CPUOffload
for flag in [True, False]:
_UpperCAmelCase = self.dist_env.copy()
_UpperCAmelCase = str(UpperCamelCase__ ).lower()
with mockenv_context(**UpperCamelCase__ ):
_UpperCAmelCase = FullyShardedDataParallelPlugin()
self.assertEqual(fsdp_plugin.cpu_offload , CPUOffload(offload_params=UpperCamelCase__ ) )
@require_fsdp
@require_multi_gpu
@slow
class a_ ( lowercase_ ):
def _snake_case ( self : List[Any] ) ->List[Any]:
super().setUp()
_UpperCAmelCase = 0.8_2
_UpperCAmelCase = [
"""fsdp_shard_grad_op_transformer_based_wrap""",
"""fsdp_full_shard_transformer_based_wrap""",
]
_UpperCAmelCase = {
"""multi_gpu_fp16""": 32_00,
"""fsdp_shard_grad_op_transformer_based_wrap_fp16""": 20_00,
"""fsdp_full_shard_transformer_based_wrap_fp16""": 19_00,
# Disabling below test as it overwhelms the RAM memory usage
# on CI self-hosted runner leading to tests getting killed.
# "fsdp_full_shard_cpu_offload_transformer_based_wrap_fp32": 1500, # fp16 was leading to indefinite hang
}
_UpperCAmelCase = 1_60
_UpperCAmelCase = 1_60
_UpperCAmelCase = inspect.getfile(accelerate.test_utils )
_UpperCAmelCase = os.path.sep.join(mod_file.split(os.path.sep )[:-1] + ["""scripts""", """external_deps"""] )
def _snake_case ( self : Tuple ) ->Dict:
_UpperCAmelCase = os.path.join(self.test_scripts_folder , """test_performance.py""" )
_UpperCAmelCase = ["""accelerate""", """launch""", """--num_processes=2""", """--num_machines=1""", """--machine_rank=0""", """--use_fsdp"""]
for config in self.performance_configs:
_UpperCAmelCase = cmd.copy()
for i, strategy in enumerate(UpperCamelCase__ ):
if strategy.lower() in config:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "fp32" in config:
cmd_config.append("""--mixed_precision=no""" )
else:
cmd_config.append("""--mixed_precision=fp16""" )
if "cpu_offload" in config:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in config:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--performance_lower_bound={self.performance_lower_bound}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
def _snake_case ( self : Optional[Any] ) ->Union[str, Any]:
_UpperCAmelCase = os.path.join(self.test_scripts_folder , """test_checkpointing.py""" )
_UpperCAmelCase = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
"""--use_fsdp""",
"""--mixed_precision=fp16""",
"""--fsdp_transformer_layer_cls_to_wrap=BertLayer""",
]
for i, strategy in enumerate(UpperCamelCase__ ):
_UpperCAmelCase = cmd.copy()
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
if strategy != "FULL_SHARD":
continue
_UpperCAmelCase = len(UpperCamelCase__ )
for state_dict_type in FSDP_STATE_DICT_TYPE:
_UpperCAmelCase = cmd_config[:state_dict_config_index]
cmd_config.append(f"""--fsdp_state_dict_type={state_dict_type}""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
"""--partial_train_epoch=1""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
_UpperCAmelCase = cmd_config[:-1]
_UpperCAmelCase = os.path.join(self.tmpdir , """epoch_0""" )
cmd_config.extend(
[
f"""--resume_from_checkpoint={resume_from_checkpoint}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() )
def _snake_case ( self : Any ) ->Dict:
_UpperCAmelCase = os.path.join(self.test_scripts_folder , """test_peak_memory_usage.py""" )
_UpperCAmelCase = [
"""accelerate""",
"""launch""",
"""--num_processes=2""",
"""--num_machines=1""",
"""--machine_rank=0""",
]
for spec, peak_mem_upper_bound in self.peak_memory_usage_upper_bound.items():
_UpperCAmelCase = cmd.copy()
if "fp16" in spec:
cmd_config.extend(["""--mixed_precision=fp16"""] )
else:
cmd_config.extend(["""--mixed_precision=no"""] )
if "multi_gpu" in spec:
continue
else:
cmd_config.extend(["""--use_fsdp"""] )
for i, strategy in enumerate(UpperCamelCase__ ):
if strategy.lower() in spec:
cmd_config.append(f"""--fsdp_sharding_strategy={i+1}""" )
break
if "cpu_offload" in spec:
cmd_config.append("""--fsdp_offload_params=True""" )
for policy in FSDP_AUTO_WRAP_POLICY:
if policy.lower() in spec:
cmd_config.append(f"""--fsdp_auto_wrap_policy={policy}""" )
break
if policy == "TRANSFORMER_BASED_WRAP":
cmd_config.append("""--fsdp_transformer_layer_cls_to_wrap=BertLayer""" )
elif policy == "SIZE_BASED_WRAP":
cmd_config.append("""--fsdp_min_num_params=2000""" )
cmd_config.extend(
[
self.test_file_path,
f"""--output_dir={self.tmpdir}""",
f"""--peak_memory_upper_bound={peak_mem_upper_bound}""",
f"""--n_train={self.n_train}""",
f"""--n_val={self.n_val}""",
] )
with patch_environment(omp_num_threads=1 ):
execute_subprocess_async(UpperCamelCase__ , env=os.environ.copy() ) | 710 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
@property
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__UpperCamelCase )
def _snake_case ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.dummy_uncond_unet
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = self.dummy_vq_model
_UpperCAmelCase = LDMPipeline(unet=__UpperCamelCase , vqvae=__UpperCamelCase , scheduler=__UpperCamelCase )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" ).images
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" , return_dict=__UpperCamelCase )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a_ ( unittest.TestCase ):
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=5 , output_type="""numpy""" ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCAmelCase = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 19 | 0 |
"""simple docstring"""
from packaging import version
from .. import __version__
from .constants import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, IMAGENET_STANDARD_MEAN, IMAGENET_STANDARD_STD
from .doc import (
add_code_sample_docstrings,
add_end_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
copy_func,
replace_return_docstrings,
)
from .generic import (
ContextManagers,
ExplicitEnum,
ModelOutput,
PaddingStrategy,
TensorType,
add_model_info_to_auto_map,
cached_property,
can_return_loss,
expand_dims,
find_labels,
flatten_dict,
infer_framework,
is_jax_tensor,
is_numpy_array,
is_tensor,
is_tf_symbolic_tensor,
is_tf_tensor,
is_torch_device,
is_torch_dtype,
is_torch_tensor,
reshape,
squeeze,
strtobool,
tensor_size,
to_numpy,
to_py_obj,
transpose,
working_or_temp_dir,
)
from .hub import (
CLOUDFRONT_DISTRIB_PREFIX,
DISABLE_TELEMETRY,
HF_MODULES_CACHE,
HUGGINGFACE_CO_PREFIX,
HUGGINGFACE_CO_RESOLVE_ENDPOINT,
PYTORCH_PRETRAINED_BERT_CACHE,
PYTORCH_TRANSFORMERS_CACHE,
S3_BUCKET_PREFIX,
TRANSFORMERS_CACHE,
TRANSFORMERS_DYNAMIC_MODULE_NAME,
EntryNotFoundError,
PushToHubMixin,
RepositoryNotFoundError,
RevisionNotFoundError,
cached_file,
default_cache_path,
define_sagemaker_information,
download_url,
extract_commit_hash,
get_cached_models,
get_file_from_repo,
get_full_repo_name,
has_file,
http_user_agent,
is_offline_mode,
is_remote_url,
move_cache,
send_example_telemetry,
try_to_load_from_cache,
)
from .import_utils import (
ENV_VARS_TRUE_AND_AUTO_VALUES,
ENV_VARS_TRUE_VALUES,
TORCH_FX_REQUIRED_VERSION,
USE_JAX,
USE_TF,
USE_TORCH,
DummyObject,
OptionalDependencyNotAvailable,
_LazyModule,
ccl_version,
direct_transformers_import,
get_torch_version,
is_accelerate_available,
is_apex_available,
is_bitsandbytes_available,
is_bsa_available,
is_coloredlogs_available,
is_cython_available,
is_datasets_available,
is_decord_available,
is_detectrona_available,
is_faiss_available,
is_flax_available,
is_ftfy_available,
is_in_notebook,
is_ipex_available,
is_jieba_available,
is_jumanpp_available,
is_kenlm_available,
is_keras_nlp_available,
is_librosa_available,
is_natten_available,
is_ninja_available,
is_onnx_available,
is_openai_available,
is_optimum_available,
is_pandas_available,
is_peft_available,
is_phonemizer_available,
is_protobuf_available,
is_psutil_available,
is_pyanvml_available,
is_pyctcdecode_available,
is_pytesseract_available,
is_pytest_available,
is_pytorch_quantization_available,
is_rjieba_available,
is_sacremoses_available,
is_safetensors_available,
is_sagemaker_dp_enabled,
is_sagemaker_mp_enabled,
is_scipy_available,
is_sentencepiece_available,
is_seqio_available,
is_sklearn_available,
is_soundfile_availble,
is_spacy_available,
is_speech_available,
is_sudachi_available,
is_tensorflow_probability_available,
is_tensorflow_text_available,
is_tfaonnx_available,
is_tf_available,
is_timm_available,
is_tokenizers_available,
is_torch_available,
is_torch_bfaa_available,
is_torch_bfaa_cpu_available,
is_torch_bfaa_gpu_available,
is_torch_compile_available,
is_torch_cuda_available,
is_torch_fx_available,
is_torch_fx_proxy,
is_torch_mps_available,
is_torch_neuroncore_available,
is_torch_tensorrt_fx_available,
is_torch_tfaa_available,
is_torch_tpu_available,
is_torchaudio_available,
is_torchdistx_available,
is_torchdynamo_available,
is_torchvision_available,
is_training_run_on_sagemaker,
is_vision_available,
requires_backends,
torch_only_method,
)
a : str = "pytorch_model.bin"
a : Union[str, Any] = "pytorch_model.bin.index.json"
a : List[Any] = "adapter_config.json"
a : int = "adapter_model.bin"
a : Union[str, Any] = "adapter_model.safetensors"
a : str = "tf_model.h5"
a : List[Any] = "tf_model.h5.index.json"
a : Tuple = "model.ckpt"
a : List[Any] = "flax_model.msgpack"
a : Tuple = "flax_model.msgpack.index.json"
a : Optional[int] = "model.safetensors"
a : int = "model.safetensors.index.json"
a : int = "config.json"
a : int = "preprocessor_config.json"
a : Optional[int] = FEATURE_EXTRACTOR_NAME
a : int = "generation_config.json"
a : str = "modelcard.json"
a : List[str] = "▁"
a : Any = SENTENCEPIECE_UNDERLINE # Kept for backward compatibility
a : Any = [
[[0, 1, 0, 1], [1, 0, 0, 1]]
] * 2 # Needs to have 0s and 1s only since XLM uses it for langs too.
a : Union[str, Any] = [[7, 6, 0, 0, 1], [1, 2, 3, 0, 0], [0, 0, 0, 4, 5]]
a : Optional[int] = [[1, 1, 1, 1, 1], [1, 1, 1, 0, 0], [0, 0, 0, 1, 1]]
def _UpperCamelCase ( _A ) -> Dict:
if version.parse(__lowerCAmelCase ) < version.parse(__lowerCAmelCase ):
if "dev" in min_version:
_UpperCAmelCase = (
"""This example requires a source install from HuggingFace Transformers (see """
"""`https://huggingface.co/docs/transformers/installation#install-from-source`),"""
)
else:
_UpperCAmelCase = F"""This example requires a minimum version of {min_version},"""
error_message += F""" but the version found is {__version__}.\n"""
raise ImportError(
error_message
+ """Check out https://github.com/huggingface/transformers/tree/main/examples#important-note for the examples corresponding to other """
"""versions of HuggingFace Transformers.""" ) | 711 |
"""simple docstring"""
import re
from filelock import FileLock
try:
import nltk
a : str = True
except (ImportError, ModuleNotFoundError):
a : List[str] = False
if NLTK_AVAILABLE:
with FileLock('''.lock''') as lock:
nltk.download('''punkt''', quiet=True)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
re.sub("""<n>""" , """""" , _A ) # remove pegasus newline char
assert NLTK_AVAILABLE, "nltk must be installed to separate newlines between sentences. (pip install nltk)"
return "\n".join(nltk.sent_tokenize(_A ) ) | 19 | 0 |
"""simple docstring"""
def _UpperCamelCase ( _A , _A , _A ) -> int:
"""simple docstring"""
def update_area_of_max_square(_A , _A ) -> int:
# BASE CASE
if row >= rows or col >= cols:
return 0
_UpperCAmelCase = update_area_of_max_square(a_ , col + 1 )
_UpperCAmelCase = update_area_of_max_square(row + 1 , col + 1 )
_UpperCAmelCase = update_area_of_max_square(row + 1 , a_ )
if mat[row][col]:
_UpperCAmelCase = 1 + min([right, diagonal, down] )
_UpperCAmelCase = max(largest_square_area[0] , a_ )
return sub_problem_sol
else:
return 0
_UpperCAmelCase = [0]
update_area_of_max_square(0 , 0 )
return largest_square_area[0]
def _UpperCamelCase ( _A , _A , _A ) -> int:
"""simple docstring"""
def update_area_of_max_square_using_dp_array(
_A , _A , _A ) -> int:
if row >= rows or col >= cols:
return 0
if dp_array[row][col] != -1:
return dp_array[row][col]
_UpperCAmelCase = update_area_of_max_square_using_dp_array(a_ , col + 1 , a_ )
_UpperCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , col + 1 , a_ )
_UpperCAmelCase = update_area_of_max_square_using_dp_array(row + 1 , a_ , a_ )
if mat[row][col]:
_UpperCAmelCase = 1 + min([right, diagonal, down] )
_UpperCAmelCase = max(largest_square_area[0] , a_ )
_UpperCAmelCase = sub_problem_sol
return sub_problem_sol
else:
return 0
_UpperCAmelCase = [0]
_UpperCAmelCase = [[-1] * cols for _ in range(a_ )]
update_area_of_max_square_using_dp_array(0 , 0 , a_ )
return largest_square_area[0]
def _UpperCamelCase ( _A , _A , _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = [[0] * (cols + 1) for _ in range(rows + 1 )]
_UpperCAmelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_UpperCAmelCase = dp_array[row][col + 1]
_UpperCAmelCase = dp_array[row + 1][col + 1]
_UpperCAmelCase = dp_array[row + 1][col]
if mat[row][col] == 1:
_UpperCAmelCase = 1 + min(a_ , a_ , a_ )
_UpperCAmelCase = max(dp_array[row][col] , a_ )
else:
_UpperCAmelCase = 0
return largest_square_area
def _UpperCamelCase ( _A , _A , _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = [0] * (cols + 1)
_UpperCAmelCase = [0] * (cols + 1)
_UpperCAmelCase = 0
for row in range(rows - 1 , -1 , -1 ):
for col in range(cols - 1 , -1 , -1 ):
_UpperCAmelCase = current_row[col + 1]
_UpperCAmelCase = next_row[col + 1]
_UpperCAmelCase = next_row[col]
if mat[row][col] == 1:
_UpperCAmelCase = 1 + min(a_ , a_ , a_ )
_UpperCAmelCase = max(current_row[col] , a_ )
else:
_UpperCAmelCase = 0
_UpperCAmelCase = current_row
return largest_square_area
if __name__ == "__main__":
import doctest
doctest.testmod()
print(largest_square_area_in_matrix_bottom_up(2, 2, [[1, 1], [1, 1]])) | 712 |
"""simple docstring"""
import unittest
from transformers import PegasusConfig, PegasusTokenizer, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor
if is_flax_available():
import os
# The slow tests are often failing with OOM error on GPU
# This makes JAX allocate exactly what is needed on demand, and deallocate memory that is no longer needed
# but will be slower as stated here https://jax.readthedocs.io/en/latest/gpu_memory_allocation.html
a : str = '''platform'''
import jax
import jax.numpy as jnp
import numpy as np
from transformers import FlaxPegasusForConditionalGeneration, FlaxPegasusModel
@require_flax
class a_ :
a : List[Any] = PegasusConfig
a : Dict = {}
a : List[Any] = 'gelu'
def __init__( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Tuple=13 , __UpperCamelCase : Tuple=7 , __UpperCamelCase : Tuple=True , __UpperCamelCase : Any=False , __UpperCamelCase : Any=99 , __UpperCamelCase : Optional[int]=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : List[str]=4 , __UpperCamelCase : Dict=37 , __UpperCamelCase : int=0.1 , __UpperCamelCase : Dict=0.1 , __UpperCamelCase : Optional[Any]=20 , __UpperCamelCase : Tuple=2 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Tuple=0 , ) ->int:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = bos_token_id
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size ).clip(3 , self.vocab_size )
_UpperCAmelCase = np.expand_dims(np.array([self.eos_token_id] * self.batch_size ) , 1 )
_UpperCAmelCase = np.concatenate([input_ids, eos_tensor] , axis=1 )
_UpperCAmelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
_UpperCAmelCase = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
_UpperCAmelCase = prepare_pegasus_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, inputs_dict
def _snake_case ( self : Tuple , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : List[Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.ones((decoder_input_ids.shape[0], max_decoder_length) , dtype="""i4""" )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _snake_case ( self : Any , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = 20
_UpperCAmelCase = model_class_name(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] )
_UpperCAmelCase ,_UpperCAmelCase = (
inputs_dict["""decoder_input_ids"""],
inputs_dict["""decoder_attention_mask"""],
)
_UpperCAmelCase = jnp.concatenate(
[
decoder_attention_mask,
jnp.zeros((decoder_attention_mask.shape[0], max_decoder_length - decoder_attention_mask.shape[1]) ),
] , axis=-1 , )
_UpperCAmelCase = model.init_cache(decoder_input_ids.shape[0] , __UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = jnp.broadcast_to(
jnp.arange(decoder_input_ids.shape[-1] - 1 )[None, :] , (decoder_input_ids.shape[0], decoder_input_ids.shape[-1] - 1) , )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, :-1] , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase , past_key_values=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = jnp.array(decoder_input_ids.shape[0] * [[decoder_input_ids.shape[-1] - 1]] , dtype="""i4""" )
_UpperCAmelCase = model.decode(
decoder_input_ids[:, -1:] , __UpperCamelCase , past_key_values=outputs_cache.past_key_values , decoder_attention_mask=__UpperCamelCase , decoder_position_ids=__UpperCamelCase , )
_UpperCAmelCase = model.decode(__UpperCamelCase , __UpperCamelCase , decoder_attention_mask=__UpperCamelCase )
_UpperCAmelCase = np.max(np.abs((outputs_cache_next[0][:, -1, :5] - outputs[0][:, -1, :5]) ) )
self.parent.assertTrue(diff < 1e-3 , msg=f"""Max diff is {diff}""" )
def _UpperCamelCase ( _A , _A , _A , _A=None , _A=None , ) -> int:
"""simple docstring"""
if attention_mask is None:
_UpperCAmelCase = np.not_equal(_A , config.pad_token_id ).astype(np.inta )
if decoder_attention_mask is None:
_UpperCAmelCase = np.concatenate(
[
np.ones(decoder_input_ids[:, :1].shape , dtype=np.inta ),
np.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ).astype(np.inta ),
] , axis=-1 , )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
}
@require_flax
class a_ ( _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(
FlaxPegasusForConditionalGeneration,
FlaxPegasusModel,
)
if is_flax_available()
else ()
)
a : Any = (FlaxPegasusForConditionalGeneration,) if is_flax_available() else ()
a : Any = True
a : int = False
a : Union[str, Any] = False
a : Optional[int] = False
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase )
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
self.config_tester.run_common_tests()
def _snake_case ( self : Optional[int] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
self.model_tester.check_use_cache_forward_with_attn_mask(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : Dict ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model_class(__UpperCamelCase )
@jax.jit
def encode_jitted(__UpperCamelCase : List[Any] , __UpperCamelCase : str=None , **__UpperCamelCase : int ):
return model.encode(input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = encode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
with self.subTest(model_class.__name__ ):
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = model.encode(inputs_dict["""input_ids"""] , inputs_dict["""attention_mask"""] )
_UpperCAmelCase = {
"""decoder_input_ids""": inputs_dict["""decoder_input_ids"""],
"""decoder_attention_mask""": inputs_dict["""decoder_attention_mask"""],
"""encoder_outputs""": encoder_outputs,
}
@jax.jit
def decode_jitted(__UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Tuple ):
return model.decode(
decoder_input_ids=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , encoder_outputs=__UpperCamelCase , )
with self.subTest("""JIT Enabled""" ):
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
with self.subTest("""JIT Disabled""" ):
with jax.disable_jit():
_UpperCAmelCase = decode_jitted(**__UpperCamelCase ).to_tuple()
self.assertEqual(len(__UpperCamelCase ) , len(__UpperCamelCase ) )
for jitted_output, output in zip(__UpperCamelCase , __UpperCamelCase ):
self.assertEqual(jitted_output.shape , output.shape )
@slow
def _snake_case ( self : int ) ->int:
'''simple docstring'''
for model_class_name in self.all_model_classes:
_UpperCAmelCase = model_class_name.from_pretrained("""google/pegasus-large""" , from_pt=__UpperCamelCase )
_UpperCAmelCase = np.ones((1, 1) )
_UpperCAmelCase = model(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
@slow
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = FlaxPegasusForConditionalGeneration.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = PegasusTokenizer.from_pretrained("""google/pegasus-xsum""" )
_UpperCAmelCase = [
""" PG&E stated it scheduled the blackouts in response to forecasts for high winds amid dry conditions. The aim is to reduce the risk of wildfires. Nearly 800 thousand customers were scheduled to be affected by the shutoffs which were expected to last through at least midday tomorrow.""",
""" The London trio are up for best UK act and best album, as well as getting two nominations in the best song category.\"We got told like this morning 'Oh I think you're nominated'\", said Dappy.\"And I was like 'Oh yeah, which one?' And now we've got nominated for four awards. I mean, wow!\"Bandmate Fazer added: \"We thought it's best of us to come down and mingle with everyone and say hello to the cameras. And now we find we've got four nominations.\"The band have two shots at the best song prize, getting the nod for their Tynchy Stryder collaboration Number One, and single Strong Again.Their album Uncle B will also go up against records by the likes of Beyonce and Kanye West.N-Dubz picked up the best newcomer Mobo in 2007, but female member Tulisa said they wouldn't be too disappointed if they didn't win this time around.\"At the end of the day we're grateful to be where we are in our careers.\"If it don't happen then it don't happen - live to fight another day and keep on making albums and hits for the fans.\"Dappy also revealed they could be performing live several times on the night.The group will be doing Number One and also a possible rendition of the War Child single, I Got Soul.The charity song is a re-working of The Killers' All These Things That I've Done and is set to feature artists like Chipmunk, Ironik and Pixie Lott.This year's Mobos will be held outside of London for the first time, in Glasgow on 30 September.N-Dubz said they were looking forward to performing for their Scottish fans and boasted about their recent shows north of the border.\"We just done Edinburgh the other day,\" said Dappy.\"We smashed up an N-Dubz show over there. We done Aberdeen about three or four months ago - we smashed up that show over there! Everywhere we go we smash it up!\" """,
]
_UpperCAmelCase = [
"""California's largest electricity provider has turned off power to hundreds of thousands of customers.""",
"""Pop group N-Dubz have revealed they were surprised to get four nominations for this year's Mobo Awards.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""np""" , truncation=__UpperCamelCase , max_length=5_12 , padding=__UpperCamelCase )
_UpperCAmelCase = model.generate(**__UpperCamelCase , num_beams=2 ).sequences
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase , skip_special_tokens=__UpperCamelCase )
assert tgt_text == decoded | 19 | 0 |
"""simple docstring"""
import json
import logging
import math
import os
import sys
from dataclasses import dataclass, field
from typing import Optional
from datasets import Dataset, load_dataset
import transformers
from transformers import (
CONFIG_MAPPING,
MODEL_FOR_MASKED_LM_MAPPING,
AutoConfig,
AutoModelForMaskedLM,
AutoTokenizer,
DataCollatorForWholeWordMask,
HfArgumentParser,
Trainer,
TrainingArguments,
set_seed,
)
from transformers.trainer_utils import get_last_checkpoint, is_main_process
a : Tuple = logging.getLogger(__name__)
a : Dict = list(MODEL_FOR_MASKED_LM_MAPPING.keys())
a : List[str] = tuple(conf.model_type for conf in MODEL_CONFIG_CLASSES)
@dataclass
class a_ :
a : Dict = field(
default=_a , metadata={
'help': (
'The model checkpoint for weights initialization.Don\'t set if you want to train a model from scratch.'
)
} , )
a : int = field(
default=_a , metadata={'help': 'If training from scratch, pass a model type from the list: ' + ', '.join(_a )} , )
a : Optional[int] = field(
default=_a , metadata={
'help': (
'Override some existing default config settings when a model is trained from scratch. Example: '
'n_embd=10,resid_pdrop=0.2,scale_attn_weights=false,summary_type=cls_index'
)
} , )
a : Union[str, Any] = field(
default=_a , metadata={'help': 'Pretrained config name or path if not the same as model_name'} )
a : Tuple = field(
default=_a , metadata={'help': 'Pretrained tokenizer name or path if not the same as model_name'} )
a : Union[str, Any] = field(
default=_a , metadata={'help': 'Where do you want to store the pretrained models downloaded from huggingface.co'} , )
a : str = field(
default=_a , metadata={'help': 'Whether to use one of the fast tokenizer (backed by the tokenizers library) or not.'} , )
a : List[str] = field(
default='main' , metadata={'help': 'The specific model version to use (can be a branch name, tag name or commit id).'} , )
a : List[str] = field(
default=_a , metadata={
'help': (
'Will use the token generated when running `huggingface-cli login` (necessary to use this script '
'with private models).'
)
} , )
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
if self.config_overrides is not None and (self.config_name is not None or self.model_name_or_path is not None):
raise ValueError(
"""--config_overrides can\'t be used in combination with --config_name or --model_name_or_path""" )
@dataclass
class a_ :
a : Optional[int] = field(
default=_a , metadata={'help': 'The name of the dataset to use (via the datasets library).'} )
a : str = field(
default=_a , metadata={'help': 'The configuration name of the dataset to use (via the datasets library).'} )
a : Tuple = field(default=_a , metadata={'help': 'The input training data file (a text file).'} )
a : Tuple = field(
default=_a , metadata={'help': 'An optional input evaluation data file to evaluate the perplexity on (a text file).'} , )
a : Dict = field(
default=_a , metadata={'help': 'An optional input train ref data file for whole word masking in Chinese.'} , )
a : List[Any] = field(
default=_a , metadata={'help': 'An optional input validation ref data file for whole word masking in Chinese.'} , )
a : Any = field(
default=_a , metadata={'help': 'Overwrite the cached training and evaluation sets'} )
a : Optional[Any] = field(
default=5 , metadata={
'help': 'The percentage of the train set used as validation set in case there\'s no validation split'
} , )
a : Optional[int] = field(
default=_a , metadata={
'help': (
'The maximum total input sequence length after tokenization. Sequences longer '
'than this will be truncated. Default to the max input length of the model.'
)
} , )
a : Tuple = field(
default=_a , metadata={'help': 'The number of processes to use for the preprocessing.'} , )
a : List[Any] = field(
default=0.15 , metadata={'help': 'Ratio of tokens to mask for masked language modeling loss'} )
a : List[str] = field(
default=_a , metadata={
'help': (
'Whether to pad all samples to `max_seq_length`. '
'If False, will pad the samples dynamically when batching to the maximum length in the batch.'
)
} , )
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
if self.train_file is not None:
_UpperCAmelCase = self.train_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`train_file` should be a csv, a json or a txt file."
if self.validation_file is not None:
_UpperCAmelCase = self.validation_file.split(""".""" )[-1]
assert extension in ["csv", "json", "txt"], "`validation_file` should be a csv, a json or a txt file."
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
with open(UpperCAmelCase__ , """r""" , encoding="""utf-8""" ) as f:
_UpperCAmelCase = [json.loads(UpperCAmelCase__ ) for line in f.read().splitlines() if (len(UpperCAmelCase__ ) > 0 and not line.isspace())]
assert len(UpperCAmelCase__ ) == len(UpperCAmelCase__ )
_UpperCAmelCase = {c: dataset[c] for c in dataset.column_names}
_UpperCAmelCase = refs
return Dataset.from_dict(UpperCAmelCase__ )
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
_UpperCAmelCase = HfArgumentParser((ModelArguments, DataTrainingArguments, TrainingArguments) )
if len(sys.argv ) == 2 and sys.argv[1].endswith(""".json""" ):
# If we pass only one argument to the script and it's the path to a json file,
# let's parse it to get our arguments.
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = parser.parse_json_file(json_file=os.path.abspath(sys.argv[1] ) )
else:
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = parser.parse_args_into_dataclasses()
# Detecting last checkpoint.
_UpperCAmelCase = None
if os.path.isdir(training_args.output_dir ) and training_args.do_train and not training_args.overwrite_output_dir:
_UpperCAmelCase = get_last_checkpoint(training_args.output_dir )
if last_checkpoint is None and len(os.listdir(training_args.output_dir ) ) > 0:
raise ValueError(
F"""Output directory ({training_args.output_dir}) already exists and is not empty. """
"""Use --overwrite_output_dir to overcome.""" )
elif last_checkpoint is not None:
logger.info(
F"""Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change """
"""the `--output_dir` or add `--overwrite_output_dir` to train from scratch.""" )
# Setup logging
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , handlers=[logging.StreamHandler(sys.stdout )] , )
logger.setLevel(logging.INFO if is_main_process(training_args.local_rank ) else logging.WARN )
# Log on each process the small summary:
logger.warning(
F"""Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"""
+ F"""distributed training: {bool(training_args.local_rank != -1 )}, 16-bits training: {training_args.fpaa}""" )
# Set the verbosity to info of the Transformers logger (on main process only):
if is_main_process(training_args.local_rank ):
transformers.utils.logging.set_verbosity_info()
transformers.utils.logging.enable_default_handler()
transformers.utils.logging.enable_explicit_format()
logger.info("""Training/evaluation parameters %s""" , UpperCAmelCase__ )
# Set seed before initializing model.
set_seed(training_args.seed )
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
#
# In distributed training, the load_dataset function guarantee that only one local process can concurrently
# download the dataset.
if data_args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
_UpperCAmelCase = load_dataset(data_args.dataset_name , data_args.dataset_config_name )
if "validation" not in datasets.keys():
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[:{data_args.validation_split_percentage}%]""" , )
_UpperCAmelCase = load_dataset(
data_args.dataset_name , data_args.dataset_config_name , split=F"""train[{data_args.validation_split_percentage}%:]""" , )
else:
_UpperCAmelCase = {}
if data_args.train_file is not None:
_UpperCAmelCase = data_args.train_file
if data_args.validation_file is not None:
_UpperCAmelCase = data_args.validation_file
_UpperCAmelCase = data_args.train_file.split(""".""" )[-1]
if extension == "txt":
_UpperCAmelCase = """text"""
_UpperCAmelCase = load_dataset(UpperCAmelCase__ , data_files=UpperCAmelCase__ )
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Load pretrained model and tokenizer
#
# Distributed training:
# The .from_pretrained methods guarantee that only one local process can concurrently
# download model & vocab.
_UpperCAmelCase = {
"""cache_dir""": model_args.cache_dir,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.config_name:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.config_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoConfig.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
_UpperCAmelCase = CONFIG_MAPPING[model_args.model_type]()
logger.warning("""You are instantiating a new config instance from scratch.""" )
if model_args.config_overrides is not None:
logger.info(F"""Overriding config: {model_args.config_overrides}""" )
config.update_from_string(model_args.config_overrides )
logger.info(F"""New config: {config}""" )
_UpperCAmelCase = {
"""cache_dir""": model_args.cache_dir,
"""use_fast""": model_args.use_fast_tokenizer,
"""revision""": model_args.model_revision,
"""use_auth_token""": True if model_args.use_auth_token else None,
}
if model_args.tokenizer_name:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.tokenizer_name , **UpperCAmelCase__ )
elif model_args.model_name_or_path:
_UpperCAmelCase = AutoTokenizer.from_pretrained(model_args.model_name_or_path , **UpperCAmelCase__ )
else:
raise ValueError(
"""You are instantiating a new tokenizer from scratch. This is not supported by this script."""
"""You can do it from another script, save it, and load it from here, using --tokenizer_name.""" )
if model_args.model_name_or_path:
_UpperCAmelCase = AutoModelForMaskedLM.from_pretrained(
model_args.model_name_or_path , from_tf=bool(""".ckpt""" in model_args.model_name_or_path ) , config=UpperCAmelCase__ , cache_dir=model_args.cache_dir , revision=model_args.model_revision , use_auth_token=True if model_args.use_auth_token else None , )
else:
logger.info("""Training new model from scratch""" )
_UpperCAmelCase = AutoModelForMaskedLM.from_config(UpperCAmelCase__ )
model.resize_token_embeddings(len(UpperCAmelCase__ ) )
# Preprocessing the datasets.
# First we tokenize all the texts.
if training_args.do_train:
_UpperCAmelCase = datasets["""train"""].column_names
else:
_UpperCAmelCase = datasets["""validation"""].column_names
_UpperCAmelCase = """text""" if """text""" in column_names else column_names[0]
_UpperCAmelCase = """max_length""" if data_args.pad_to_max_length else False
def tokenize_function(_A ):
# Remove empty lines
_UpperCAmelCase = [line for line in examples["""text"""] if len(UpperCAmelCase__ ) > 0 and not line.isspace()]
return tokenizer(examples["""text"""] , padding=UpperCAmelCase__ , truncation=UpperCAmelCase__ , max_length=data_args.max_seq_length )
_UpperCAmelCase = datasets.map(
UpperCAmelCase__ , batched=UpperCAmelCase__ , num_proc=data_args.preprocessing_num_workers , remove_columns=[text_column_name] , load_from_cache_file=not data_args.overwrite_cache , )
# Add the chinese references if provided
if data_args.train_ref_file is not None:
_UpperCAmelCase = add_chinese_references(tokenized_datasets["""train"""] , data_args.train_ref_file )
if data_args.validation_ref_file is not None:
_UpperCAmelCase = add_chinese_references(
tokenized_datasets["""validation"""] , data_args.validation_ref_file )
# If we have ref files, need to avoid it removed by trainer
_UpperCAmelCase = data_args.train_ref_file or data_args.validation_ref_file
if has_ref:
_UpperCAmelCase = False
# Data collator
# This one will take care of randomly masking the tokens.
_UpperCAmelCase = DataCollatorForWholeWordMask(tokenizer=UpperCAmelCase__ , mlm_probability=data_args.mlm_probability )
# Initialize our Trainer
_UpperCAmelCase = Trainer(
model=UpperCAmelCase__ , args=UpperCAmelCase__ , train_dataset=tokenized_datasets["""train"""] if training_args.do_train else None , eval_dataset=tokenized_datasets["""validation"""] if training_args.do_eval else None , tokenizer=UpperCAmelCase__ , data_collator=UpperCAmelCase__ , )
# Training
if training_args.do_train:
if last_checkpoint is not None:
_UpperCAmelCase = last_checkpoint
elif model_args.model_name_or_path is not None and os.path.isdir(model_args.model_name_or_path ):
_UpperCAmelCase = model_args.model_name_or_path
else:
_UpperCAmelCase = None
_UpperCAmelCase = trainer.train(resume_from_checkpoint=UpperCAmelCase__ )
trainer.save_model() # Saves the tokenizer too for easy upload
_UpperCAmelCase = os.path.join(training_args.output_dir , """train_results.txt""" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , """w""" ) as writer:
logger.info("""***** Train results *****""" )
for key, value in sorted(train_result.metrics.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
# Need to save the state, since Trainer.save_model saves only the tokenizer with the model
trainer.state.save_to_json(os.path.join(training_args.output_dir , """trainer_state.json""" ) )
# Evaluation
_UpperCAmelCase = {}
if training_args.do_eval:
logger.info("""*** Evaluate ***""" )
_UpperCAmelCase = trainer.evaluate()
_UpperCAmelCase = math.exp(eval_output["""eval_loss"""] )
_UpperCAmelCase = perplexity
_UpperCAmelCase = os.path.join(training_args.output_dir , """eval_results_mlm_wwm.txt""" )
if trainer.is_world_process_zero():
with open(UpperCAmelCase__ , """w""" ) as writer:
logger.info("""***** Eval results *****""" )
for key, value in sorted(results.items() ):
logger.info(F""" {key} = {value}""" )
writer.write(F"""{key} = {value}\n""" )
return results
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
main()
if __name__ == "__main__":
main() | 713 |
"""simple docstring"""
import tempfile
import unittest
from transformers import TaConfig, is_torch_available
from transformers.testing_utils import (
require_sentencepiece,
require_tokenizers,
require_torch,
slow,
torch_device,
)
from ...generation.test_utils import GenerationTesterMixin
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import AutoTokenizer, UMTaForConditionalGeneration, UMTaForQuestionAnswering, UMTaModel
class a_ :
def __init__( self : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : List[Any]=99 , __UpperCamelCase : Optional[int]=13 , __UpperCamelCase : List[str]=7 , __UpperCamelCase : List[Any]=9 , __UpperCamelCase : List[Any]=True , __UpperCamelCase : str=True , __UpperCamelCase : int=False , __UpperCamelCase : int=32 , __UpperCamelCase : Dict=5 , __UpperCamelCase : Optional[int]=4 , __UpperCamelCase : Any=37 , __UpperCamelCase : List[Any]=8 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : str=0.0_0_2 , __UpperCamelCase : Union[str, Any]=1 , __UpperCamelCase : List[Any]=0 , __UpperCamelCase : Tuple=0 , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Any=None , ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = encoder_seq_length
_UpperCAmelCase = decoder_seq_length
# For common tests
_UpperCAmelCase = self.decoder_seq_length
_UpperCAmelCase = is_training
_UpperCAmelCase = use_attention_mask
_UpperCAmelCase = use_labels
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = d_ff
_UpperCAmelCase = relative_attention_num_buckets
_UpperCAmelCase = dropout_rate
_UpperCAmelCase = initializer_factor
_UpperCAmelCase = eos_token_id
_UpperCAmelCase = pad_token_id
_UpperCAmelCase = decoder_start_token_id
_UpperCAmelCase = None
_UpperCAmelCase = decoder_layers
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
return TaConfig.from_pretrained("""google/umt5-base""" )
def _snake_case ( self : List[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : str=None , ) ->int:
'''simple docstring'''
if attention_mask is None:
_UpperCAmelCase = input_ids.ne(config.pad_token_id )
if decoder_attention_mask is None:
_UpperCAmelCase = decoder_input_ids.ne(config.pad_token_id )
if head_mask is None:
_UpperCAmelCase = torch.ones(config.num_hidden_layers , config.num_attention_heads , device=__UpperCamelCase )
if decoder_head_mask is None:
_UpperCAmelCase = torch.ones(config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
if cross_attn_head_mask is None:
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_attention_heads , device=__UpperCamelCase )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
def _snake_case ( self : List[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = ids_tensor([self.batch_size, self.encoder_seq_length] , self.vocab_size )
_UpperCAmelCase = ids_tensor([self.batch_size, self.decoder_seq_length] , self.vocab_size )
# we need to clamp the input ids here to avoid having pad token in between
# this is because for NllbMoe the position_ids are prepared such that
# all pad tokens have pos id = 2 and rest are between 2..seq_length
# and the seq_length here is seq_length - num_pad_tokens
# but when using past, there is no way of knowing if the past input ids had
# pad tokens in them, which results in incorrect seq_lenth and which in turn results in
# position_ids being off by num_pad_tokens in past input
_UpperCAmelCase = input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = decoder_input_ids.clamp(self.pad_token_id + 1 )
_UpperCAmelCase = self.get_config()
_UpperCAmelCase = config.num_attention_heads
_UpperCAmelCase = self.prepare_inputs_dict(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
return config, input_dict
def _snake_case ( self : Union[str, Any] ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.prepare_config_and_inputs()
return config, inputs_dict
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
return TaConfig(
vocab_size=1_66 , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : Tuple ) ->Dict:
'''simple docstring'''
return TaConfig(
vocab_size=self.vocab_size , d_model=self.hidden_size , d_ff=self.d_ff , d_kv=self.hidden_size // self.num_attention_heads , num_layers=self.num_hidden_layers , num_decoder_layers=self.decoder_layers , num_heads=self.num_attention_heads , relative_attention_num_buckets=self.relative_attention_num_buckets , dropout_rate=self.dropout_rate , initializer_factor=self.initializer_factor , eos_token_id=self.eos_token_id , bos_token_id=self.pad_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.decoder_start_token_id , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[str] , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(
input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase , attention_mask=__UpperCamelCase , decoder_attention_mask=__UpperCamelCase , )
_UpperCAmelCase = model(input_ids=__UpperCamelCase , decoder_input_ids=__UpperCamelCase )
_UpperCAmelCase = result.last_hidden_state
_UpperCAmelCase = result.past_key_values
_UpperCAmelCase = result.encoder_last_hidden_state
self.parent.assertEqual(encoder_output.size() , (self.batch_size, self.encoder_seq_length, self.hidden_size) )
self.parent.assertEqual(decoder_output.size() , (self.batch_size, self.decoder_seq_length, self.hidden_size) )
# There should be `num_layers` key value embeddings stored in decoder_past
self.parent.assertEqual(len(__UpperCamelCase ) , config.num_layers )
# There should be a self attn key, a self attn value, a cross attn key and a cross attn value stored in each decoder_past tuple
self.parent.assertEqual(len(decoder_past[0] ) , 4 )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Dict , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] , ) ->str:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).get_decoder().to(__UpperCamelCase ).eval()
# first forward pass
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase )
_UpperCAmelCase = model(__UpperCamelCase , use_cache=__UpperCamelCase )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) )
self.parent.assertTrue(len(__UpperCamelCase ) == len(__UpperCamelCase ) + 1 )
_UpperCAmelCase ,_UpperCAmelCase = outputs.to_tuple()
# create hypothetical next token and extent to next_input_ids
_UpperCAmelCase = ids_tensor((self.batch_size, 1) , config.vocab_size )
# append to next input_ids and
_UpperCAmelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
_UpperCAmelCase = model(__UpperCamelCase )["""last_hidden_state"""]
_UpperCAmelCase = model(__UpperCamelCase , past_key_values=__UpperCamelCase )["""last_hidden_state"""]
# select random slice
_UpperCAmelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
_UpperCAmelCase = output_from_no_past[:, -1, random_slice_idx].detach()
_UpperCAmelCase = output_from_past[:, 0, random_slice_idx].detach()
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(__UpperCamelCase , __UpperCamelCase , atol=1e-3 ) )
def _snake_case ( self : Optional[int] , __UpperCamelCase : int , __UpperCamelCase : Dict , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = UMTaModel(config=__UpperCamelCase ).to(__UpperCamelCase ).half().eval()
_UpperCAmelCase = model(**__UpperCamelCase )["""last_hidden_state"""]
self.parent.assertFalse(torch.isnan(__UpperCamelCase ).any().item() )
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : List[str] = (
(UMTaModel, UMTaForConditionalGeneration, UMTaForQuestionAnswering) if is_torch_available() else ()
)
a : Tuple = (UMTaForConditionalGeneration,) if is_torch_available() else ()
a : Optional[Any] = (
{
'conversational': UMTaForConditionalGeneration,
'feature-extraction': UMTaModel,
'summarization': UMTaForConditionalGeneration,
'text2text-generation': UMTaForConditionalGeneration,
'translation': UMTaForConditionalGeneration,
'question-answering': UMTaForQuestionAnswering,
}
if is_torch_available()
else {}
)
a : Any = True
a : Optional[int] = False
a : Any = False
a : Optional[int] = True
a : Optional[Any] = True
# The small UMT5 model needs higher percentages for CPU/MP tests
a : int = [0.8, 0.9]
def _snake_case ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaModelTester(self )
@unittest.skip("""Test has a segmentation fault on torch 1.8.0""" )
def _snake_case ( self : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = UMTaModel(config_and_inputs[0] ).to(__UpperCamelCase )
with tempfile.TemporaryDirectory() as tmpdirname:
torch.onnx.export(
__UpperCamelCase , (config_and_inputs[1], config_and_inputs[3], config_and_inputs[2]) , f"""{tmpdirname}/t5_test.onnx""" , export_params=__UpperCamelCase , opset_version=9 , input_names=["""input_ids""", """decoder_input_ids"""] , )
@unittest.skipIf(torch_device == """cpu""" , """Cant do half precision""" )
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model_fpaa_forward(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = ["""encoder_attentions""", """decoder_attentions""", """cross_attentions"""]
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
_UpperCAmelCase = config_and_inputs[0]
_UpperCAmelCase = UMTaForConditionalGeneration(__UpperCamelCase ).eval()
model.to(__UpperCamelCase )
_UpperCAmelCase = {
"""head_mask""": torch.zeros(config.num_layers , config.num_heads , device=__UpperCamelCase ),
"""decoder_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
"""cross_attn_head_mask""": torch.zeros(config.num_decoder_layers , config.num_heads , device=__UpperCamelCase ),
}
for attn_name, (name, mask) in zip(__UpperCamelCase , head_masking.items() ):
_UpperCAmelCase = {name: mask}
# Explicitly pass decoder_head_mask as it is required from T5 model when head_mask specified
if name == "head_mask":
_UpperCAmelCase = torch.ones(
config.num_decoder_layers , config.num_heads , device=__UpperCamelCase )
_UpperCAmelCase = model.generate(
config_and_inputs[1]["""input_ids"""] , num_beams=1 , max_length=3 , output_attentions=__UpperCamelCase , return_dict_in_generate=__UpperCamelCase , **__UpperCamelCase , )
# We check the state of decoder_attentions and cross_attentions just from the last step
_UpperCAmelCase = out[attn_name] if attn_name == attention_names[0] else out[attn_name][-1]
self.assertEqual(sum([w.sum().item() for w in attn_weights] ) , 0.0 )
@unittest.skip("""Does not work on the tiny model as we keep hitting edge cases.""" )
def _snake_case ( self : Tuple ) ->List[Any]:
'''simple docstring'''
pass
@require_torch
@require_sentencepiece
@require_tokenizers
class a_ ( unittest.TestCase ):
@slow
@unittest.skip(
"""Unless we stop stripping left and right by default for all special tokens, the expected ids obtained here will not match the original ones. Wait for https://github.com/huggingface/transformers/pull/23909 to be merged""" )
def _snake_case ( self : Optional[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = UMTaForConditionalGeneration.from_pretrained("""google/umt5-small""" , return_dict=__UpperCamelCase ).to(__UpperCamelCase )
_UpperCAmelCase = AutoTokenizer.from_pretrained("""google/umt5-small""" , use_fast=__UpperCamelCase , legacy=__UpperCamelCase )
_UpperCAmelCase = [
"""Bonjour monsieur <extra_id_0> bien <extra_id_1>.""",
"""No se como puedo <extra_id_0>.""",
"""This is the reason why we <extra_id_0> them.""",
"""The <extra_id_0> walks in <extra_id_1>, seats""",
"""A <extra_id_0> walks into a bar and orders a <extra_id_1> with <extra_id_2> pinch of <extra_id_3>.""",
]
_UpperCAmelCase = tokenizer(__UpperCamelCase , return_tensors="""pt""" , padding=__UpperCamelCase ).input_ids
# fmt: off
_UpperCAmelCase = torch.tensor(
[
[ 3_85_30, 21_07_03, 25_62_99, 14_10, 25_62_98, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 8_26, 3_21, 6_71, 2_59_22, 25_62_99, 2_74, 1, 0,0, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 14_60, 3_39, 3_12, 1_90_14, 1_06_20, 7_58, 25_62_99, 23_55,2_74, 1, 0, 0, 0, 0, 0, 0,0, 0],
[ 5_17, 25_62_99, 1_48_69, 2_81, 3_01, 25_62_98, 2_75, 11_99_83,1, 0, 0, 0, 0, 0, 0, 0,0, 0],
[ 3_20, 25_62_99, 1_48_69, 2_81, 22_34, 2_89, 22_75, 3_33,6_13_91, 2_89, 25_62_98, 5_43, 25_62_97, 16_87_14, 3_29, 25_62_96,2_74, 1],
] )
# fmt: on
torch.testing.assert_allclose(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = model.generate(input_ids.to(__UpperCamelCase ) )
_UpperCAmelCase = [
"""<pad><extra_id_0> et<extra_id_1> [eod] <extra_id_2><extra_id_55>.. [eod] 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 💐 <extra_id_56>ajšietosto<extra_id_56>lleux<extra_id_19><extra_id_6>ajšie</s>""",
"""<pad><extra_id_0>.<extra_id_1>.,<0x0A>...spech <0x0A><extra_id_20> <extra_id_21></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> are not going to be a part of the world. We are not going to be a part of<extra_id_1> and<extra_id_2><0x0A><extra_id_48>.<extra_id_48></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0> door<extra_id_1>, the door<extra_id_2> 피해[/</s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
"""<pad><extra_id_0>nyone who<extra_id_1> drink<extra_id_2> a<extra_id_3> alcohol<extra_id_4> A<extra_id_5> A. This<extra_id_6> I<extra_id_7><extra_id_52><extra_id_53></s><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad><pad>""",
]
_UpperCAmelCase = tokenizer.batch_decode(__UpperCamelCase )
self.assertEqual(__UpperCamelCase , __UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
import unittest
from transformers import RoFormerTokenizer, RoFormerTokenizerFast
from transformers.testing_utils import require_rjieba, require_tokenizers
from ...test_tokenization_common import TokenizerTesterMixin
@require_rjieba
@require_tokenizers
class a_ ( _UpperCAmelCase , unittest.TestCase ):
a : Any = RoFormerTokenizer
a : str = RoFormerTokenizerFast
a : Tuple = True
a : List[Any] = True
def _snake_case ( self : Any ) ->List[Any]:
'''simple docstring'''
super().setUp()
def _snake_case ( self : Union[str, Any] , **__UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
return self.tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__UpperCamelCase )
def _snake_case ( self : Dict , **__UpperCamelCase : List[Any] ) ->Tuple:
'''simple docstring'''
return self.rust_tokenizer_class.from_pretrained("""junnyu/roformer_chinese_base""" , **__UpperCamelCase )
def _snake_case ( self : Tuple ) ->str:
'''simple docstring'''
_UpperCAmelCase = """永和服装饰品有限公司,今天天气非常好"""
_UpperCAmelCase = """永和 服装 饰品 有限公司 , 今 天 天 气 非常 好"""
return input_text, output_text
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self.get_tokenizer()
_UpperCAmelCase ,_UpperCAmelCase = self.get_chinese_input_output_texts()
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.get_rust_tokenizer()
_UpperCAmelCase ,_UpperCAmelCase = self.get_chinese_input_output_texts()
_UpperCAmelCase = tokenizer.tokenize(__UpperCamelCase )
self.assertListEqual(__UpperCamelCase , output_text.split() )
_UpperCAmelCase = tokens + [tokenizer.unk_token]
_UpperCAmelCase = [2_29_43, 2_13_32, 3_44_31, 4_59_04, 1_17, 3_06, 12_31, 12_31, 26_53, 3_39_94, 12_66, 1_00]
self.assertListEqual(tokenizer.convert_tokens_to_ids(__UpperCamelCase ) , __UpperCamelCase )
def _snake_case ( self : Any ) ->str:
'''simple docstring'''
pass
def _snake_case ( self : int ) ->List[Any]:
'''simple docstring'''
pass
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
pass | 714 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class a_ ( _UpperCAmelCase ):
def _snake_case ( self : str ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : Any ) ->List[str]:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] ) , type=pa.intaa() )
def _snake_case ( self : Optional[int] ) ->Tuple:
'''simple docstring'''
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""bool""" ) , type=Value("""int64""" ) ) )
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : str ) ->Dict:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=Value("""int64""" ) ) )
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([1, 2, 3] , try_type=Value("""int32""" ) ) )
self.assertEqual(arr.type , pa.intaa() )
def _snake_case ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=Value("""int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
def _snake_case ( self : List[Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , type=ArrayaD((1, 3) , """int64""" ) ) )
def _snake_case ( self : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence([[[1, 2, 3]]] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , ArrayaDExtensionType((1, 3) , """int64""" ) )
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pa.array(TypedSequence(["""foo""", """bar"""] , try_type=ArrayaD((1, 3) , """int64""" ) ) )
self.assertEqual(arr.type , pa.string() )
@require_pil
def _snake_case ( self : str ) ->Optional[Any]:
'''simple docstring'''
import PIL.Image
_UpperCAmelCase = PIL.Image.fromarray(np.arange(10 , dtype=np.uinta ).reshape(2 , 5 ) )
with patch(
"""datasets.arrow_writer.cast_to_python_objects""" , side_effect=__UpperCamelCase ) as mock_cast_to_python_objects:
_UpperCAmelCase = pa.array(TypedSequence([{"""path""": None, """bytes""": b"""image_bytes"""}, pil_image] , type=Image() ) )
_UpperCAmelCase ,_UpperCAmelCase = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn("""optimize_list_casting""" , __UpperCamelCase )
self.assertFalse(kwargs["""optimize_list_casting"""] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferReader(_A ) if isinstance(_A , pa.Buffer ) else pa.memory_map(_A )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = Features({"""labels""": ClassLabel(names=["""neg""", """pos"""] )} )
with ArrowWriter(stream=_A , features=_A ) as writer:
writer.write({"""labels""": 0} )
writer.write({"""labels""": 1} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pa.ipc.open_stream(_A )
_UpperCAmelCase = f.read_all()
_UpperCAmelCase = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(_A )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=[1, 2] )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
with pytest.raises(_A ):
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1_0 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=1_0 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
@pytest.mark.parametrize("""writer_batch_size""" , [None, 2, 1_0] )
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(
stream=_A , writer_batch_size=_A , hash_salt="""split_name""" , check_duplicates=_A , ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} , key=1 )
writer.write({"""col_1""": """bar""", """col_2""": 2} , key=2 )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
writer.write_batch({"""col_1""": [], """col_2""": []} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_table(pa.Table.from_pydict({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize("""writer_batch_size""" , [None, 1, 1_0] )
@pytest.mark.parametrize(
"""fields""" , [None, {"""col_1""": pa.string(), """col_2""": pa.intaa()}, {"""col_1""": pa.string(), """col_2""": pa.intaa()}] )
def _UpperCamelCase ( _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
_UpperCAmelCase = pa.schema(_A ) if fields else None
with ArrowWriter(stream=_A , schema=_A , writer_batch_size=_A ) as writer:
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""foo"""], """col_2""": [1]} ) )
writer.write_row(pa.Table.from_pydict({"""col_1""": ["""bar"""], """col_2""": [2]} ) )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def _UpperCamelCase ( ) -> str:
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
_UpperCAmelCase = {"""col_1""": pa.string(), """col_2""": pa.intaa()}
_UpperCAmelCase = os.path.join(_A , """test.arrow""" )
with ArrowWriter(path=_A , schema=pa.schema(_A ) ) as writer:
writer.write_batch({"""col_1""": ["""foo""", """bar"""], """col_2""": [1, 2]} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(_A , metadata=writer._schema.metadata )
_check_output(_A , 1 )
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
if pa.types.is_list(_A ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
if isinstance(lst[0] , _A ):
change_first_primitive_element_in_list(lst[0] , _A )
else:
_UpperCAmelCase = value
@pytest.mark.parametrize("""optimized_int_type, expected_dtype""" , [(None, pa.intaa()), (Value("""int32""" ), pa.intaa())] )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.array(TypedSequence(_A , optimized_int_type=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
"""col, expected_dtype""" , [
("""attention_mask""", pa.inta()),
("""special_tokens_mask""", pa.inta()),
("""token_type_ids""", pa.inta()),
("""input_ids""", pa.intaa()),
("""other""", pa.intaa()),
] , )
@pytest.mark.parametrize("""sequence""" , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def _UpperCamelCase ( _A , _A , _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
_UpperCAmelCase = copy.deepcopy(_A )
_UpperCAmelCase = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(_A , _A )
_UpperCAmelCase = pa.array(OptimizedTypedSequence(_A , col=_A ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize("""raise_exception""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = str(tmp_path / """dataset-train.arrow""" )
try:
with ArrowWriter(path=_A ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = """mock://dataset-train.arrow"""
with ArrowWriter(path=_A , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(_A ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(_A )
def _UpperCamelCase ( ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(stream=_A ) as writer:
writer.write({"""col_1""": """foo""", """col_2""": 1} )
writer.write({"""col_1""": """bar""", """col_2""": 2} )
_UpperCAmelCase ,_UpperCAmelCase = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize("""embed_local_files""" , [False, True] )
def _UpperCamelCase ( _A , _A ) -> Any:
"""simple docstring"""
import PIL.Image
_UpperCAmelCase = str(tmp_path / """test_image_rgb.jpg""" )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(_A , format="""png""" )
_UpperCAmelCase = pa.BufferOutputStream()
with ParquetWriter(
stream=_A , features=Features({"""image""": Image()} ) , embed_local_files=_A ) as writer:
writer.write({"""image""": image_path} )
writer.finalize()
_UpperCAmelCase = pa.BufferReader(output.getvalue() )
_UpperCAmelCase = pq.read_table(_A )
_UpperCAmelCase = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out["""image"""][0]["""path"""] , _A )
with open(_A , """rb""" ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = pa.schema([pa.field("""col_1""" , pa.string() , nullable=_A )] )
_UpperCAmelCase = pa.BufferOutputStream()
with ArrowWriter(stream=_A ) as writer:
writer._build_writer(inferred_schema=_A )
assert writer._schema == pa.schema([pa.field("""col_1""" , pa.string() )] ) | 19 | 0 |
"""simple docstring"""
import os
import string
import sys
a : Dict = 1 << 8
a : List[str] = {
'''tab''': ord('''\t'''),
'''newline''': ord('''\r'''),
'''esc''': 2_7,
'''up''': 6_5 + ARROW_KEY_FLAG,
'''down''': 6_6 + ARROW_KEY_FLAG,
'''right''': 6_7 + ARROW_KEY_FLAG,
'''left''': 6_8 + ARROW_KEY_FLAG,
'''mod_int''': 9_1,
'''undefined''': sys.maxsize,
'''interrupt''': 3,
'''insert''': 5_0,
'''delete''': 5_1,
'''pg_up''': 5_3,
'''pg_down''': 5_4,
}
a : int = KEYMAP['''up''']
a : Optional[int] = KEYMAP['''left''']
if sys.platform == "win32":
a : Tuple = []
a : Dict = {
B'''\xe0H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\x00H''': KEYMAP['''up'''] - ARROW_KEY_FLAG,
B'''\xe0P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\x00P''': KEYMAP['''down'''] - ARROW_KEY_FLAG,
B'''\xe0M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\x00M''': KEYMAP['''right'''] - ARROW_KEY_FLAG,
B'''\xe0K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
B'''\x00K''': KEYMAP['''left'''] - ARROW_KEY_FLAG,
}
for i in range(1_0):
a : Any = ord(str(i))
def _UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
if os.name == "nt":
import msvcrt
_UpperCAmelCase = """mbcs"""
# Flush the keyboard buffer
while msvcrt.kbhit():
msvcrt.getch()
if len(_UpperCamelCase ) == 0:
# Read the keystroke
_UpperCAmelCase = msvcrt.getch()
# If it is a prefix char, get second part
if ch in (b"\x00", b"\xe0"):
_UpperCAmelCase = ch + msvcrt.getch()
# Translate actual Win chars to bullet char types
try:
_UpperCAmelCase = chr(WIN_KEYMAP[cha] )
WIN_CH_BUFFER.append(chr(KEYMAP["""mod_int"""] ) )
WIN_CH_BUFFER.append(_UpperCamelCase )
if ord(_UpperCamelCase ) in (
KEYMAP["insert"] - 1 << 9,
KEYMAP["delete"] - 1 << 9,
KEYMAP["pg_up"] - 1 << 9,
KEYMAP["pg_down"] - 1 << 9,
):
WIN_CH_BUFFER.append(chr(1_2_6 ) )
_UpperCAmelCase = chr(KEYMAP["""esc"""] )
except KeyError:
_UpperCAmelCase = cha[1]
else:
_UpperCAmelCase = ch.decode(_UpperCamelCase )
else:
_UpperCAmelCase = WIN_CH_BUFFER.pop(0 )
elif os.name == "posix":
import termios
import tty
_UpperCAmelCase = sys.stdin.fileno()
_UpperCAmelCase = termios.tcgetattr(_UpperCamelCase )
try:
tty.setraw(_UpperCamelCase )
_UpperCAmelCase = sys.stdin.read(1 )
finally:
termios.tcsetattr(_UpperCamelCase , termios.TCSADRAIN , _UpperCamelCase )
return ch
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = get_raw_chars()
if ord(_UpperCamelCase ) in [KEYMAP["interrupt"], KEYMAP["newline"]]:
return char
elif ord(_UpperCamelCase ) == KEYMAP["esc"]:
_UpperCAmelCase = get_raw_chars()
if ord(_UpperCamelCase ) == KEYMAP["mod_int"]:
_UpperCAmelCase = get_raw_chars()
if ord(_UpperCamelCase ) >= KEYMAP["arrow_begin"] - ARROW_KEY_FLAG and ord(_UpperCamelCase ) <= KEYMAP["arrow_end"] - ARROW_KEY_FLAG:
return chr(ord(_UpperCamelCase ) + ARROW_KEY_FLAG )
else:
return KEYMAP["undefined"]
else:
return get_raw_chars()
else:
if char in string.printable:
return char
else:
return KEYMAP["undefined"] | 715 |
"""simple docstring"""
# Lint as: python3
import sys
from collections.abc import Mapping
from typing import TYPE_CHECKING, Dict, Optional
import numpy as np
import pyarrow as pa
from .. import config
from ..utils.logging import get_logger
from ..utils.py_utils import map_nested
from .formatting import TensorFormatter
if TYPE_CHECKING:
import jax
import jaxlib
a : List[Any] = get_logger()
a : Optional[dict] = None
class a_ ( TensorFormatter[Mapping, 'jax.Array', Mapping] ):
def __init__( self : int , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : int ) ->Tuple:
'''simple docstring'''
super().__init__(features=__UpperCamelCase )
import jax
from jaxlib.xla_client import Device
if isinstance(__UpperCamelCase , __UpperCamelCase ):
raise ValueError(
f"""Expected {device} to be a `str` not {type(__UpperCamelCase )}, as `jaxlib.xla_extension.Device` """
"""is not serializable neither with `pickle` nor with `dill`. Instead you can surround """
"""the device with `str()` to get its string identifier that will be internally mapped """
"""to the actual `jaxlib.xla_extension.Device`.""" )
_UpperCAmelCase = device if isinstance(__UpperCamelCase , __UpperCamelCase ) else str(jax.devices()[0] )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
if self.device not in list(DEVICE_MAPPING.keys() ):
logger.warning(
f"""Device with string identifier {self.device} not listed among the available """
f"""devices: {list(DEVICE_MAPPING.keys() )}, so falling back to the default """
f"""device: {str(jax.devices()[0] )}.""" )
_UpperCAmelCase = str(jax.devices()[0] )
_UpperCAmelCase = jnp_array_kwargs
@staticmethod
def _snake_case ( ) ->Dict[str, "jaxlib.xla_extension.Device"]:
'''simple docstring'''
import jax
return {str(__UpperCamelCase ): device for device in jax.devices()}
def _snake_case ( self : Dict , __UpperCamelCase : Any ) ->Union[str, Any]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , __UpperCamelCase ) and column:
if all(
isinstance(__UpperCamelCase , jax.Array ) and x.shape == column[0].shape and x.dtype == column[0].dtype for x in column ):
return jnp.stack(__UpperCamelCase , axis=0 )
return column
def _snake_case ( self : List[str] , __UpperCamelCase : Any ) ->Optional[int]:
'''simple docstring'''
import jax
import jax.numpy as jnp
if isinstance(__UpperCamelCase , (str, bytes, type(__UpperCamelCase )) ):
return value
elif isinstance(__UpperCamelCase , (np.character, np.ndarray) ) and np.issubdtype(value.dtype , np.character ):
return value.tolist()
_UpperCAmelCase = {}
if isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.integer ):
# the default int precision depends on the jax config
# see https://jax.readthedocs.io/en/latest/notebooks/Common_Gotchas_in_JAX.html#double-64bit-precision
if jax.config.jax_enable_xaa:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
else:
_UpperCAmelCase = {"""dtype""": jnp.intaa}
elif isinstance(__UpperCamelCase , (np.number, np.ndarray) ) and np.issubdtype(value.dtype , np.floating ):
_UpperCAmelCase = {"""dtype""": jnp.floataa}
elif config.PIL_AVAILABLE and "PIL" in sys.modules:
import PIL.Image
if isinstance(__UpperCamelCase , PIL.Image.Image ):
_UpperCAmelCase = np.asarray(__UpperCamelCase )
# using global variable since `jaxlib.xla_extension.Device` is not serializable neither
# with `pickle` nor with `dill`, so we need to use a global variable instead
global DEVICE_MAPPING
if DEVICE_MAPPING is None:
_UpperCAmelCase = self._map_devices_to_str()
with jax.default_device(DEVICE_MAPPING[self.device] ):
# calling jnp.array on a np.ndarray does copy the data
# see https://github.com/google/jax/issues/4486
return jnp.array(__UpperCamelCase , **{**default_dtype, **self.jnp_array_kwargs} )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
import jax
# support for torch, tf, jax etc.
if config.TORCH_AVAILABLE and "torch" in sys.modules:
import torch
if isinstance(__UpperCamelCase , torch.Tensor ):
return self._tensorize(data_struct.detach().cpu().numpy()[()] )
if hasattr(__UpperCamelCase , """__array__""" ) and not isinstance(__UpperCamelCase , jax.Array ):
_UpperCAmelCase = data_struct.__array__()
# support for nested types like struct of list of struct
if isinstance(__UpperCamelCase , np.ndarray ):
if data_struct.dtype == object: # jax arrays cannot be instantied from an array of objects
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
elif isinstance(__UpperCamelCase , (list, tuple) ):
return self._consolidate([self.recursive_tensorize(__UpperCamelCase ) for substruct in data_struct] )
return self._tensorize(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : dict ) ->int:
'''simple docstring'''
return map_nested(self._recursive_tensorize , __UpperCamelCase , map_list=__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_row(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_row(__UpperCamelCase )
return self.recursive_tensorize(__UpperCamelCase )
def _snake_case ( self : Optional[int] , __UpperCamelCase : pa.Table ) ->"jax.Array":
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_column(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_column(__UpperCamelCase , pa_table.column_names[0] )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
_UpperCAmelCase = self._consolidate(__UpperCamelCase )
return column
def _snake_case ( self : Optional[Any] , __UpperCamelCase : pa.Table ) ->Mapping:
'''simple docstring'''
_UpperCAmelCase = self.numpy_arrow_extractor().extract_batch(__UpperCamelCase )
_UpperCAmelCase = self.python_features_decoder.decode_batch(__UpperCamelCase )
_UpperCAmelCase = self.recursive_tensorize(__UpperCamelCase )
for column_name in batch:
_UpperCAmelCase = self._consolidate(batch[column_name] )
return batch | 19 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Mapping
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
a : Optional[int] = logging.get_logger(__name__)
a : Optional[Any] = {
"""andreasmadsen/efficient_mlm_m0.40""": (
"""https://huggingface.co/andreasmadsen/efficient_mlm_m0.40/resolve/main/config.json"""
),
}
class a_ ( __UpperCAmelCase ):
'''simple docstring'''
a : Dict = 'roberta-prelayernorm'
def __init__( self : Any , __UpperCamelCase : str=5_02_65 , __UpperCamelCase : Optional[Any]=7_68 , __UpperCamelCase : Tuple=12 , __UpperCamelCase : Tuple=12 , __UpperCamelCase : Any=30_72 , __UpperCamelCase : str="gelu" , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : Union[str, Any]=0.1 , __UpperCamelCase : Tuple=5_12 , __UpperCamelCase : Any=2 , __UpperCamelCase : List[str]=0.0_2 , __UpperCamelCase : Optional[Any]=1e-12 , __UpperCamelCase : Optional[int]=1 , __UpperCamelCase : Union[str, Any]=0 , __UpperCamelCase : Any=2 , __UpperCamelCase : Any="absolute" , __UpperCamelCase : Any=True , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : Optional[Any] , ) ->List[Any]:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , bos_token_id=__UpperCamelCase , eos_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = type_vocab_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = use_cache
_UpperCAmelCase = classifier_dropout
class a_ ( __UpperCAmelCase ):
'''simple docstring'''
@property
def _snake_case ( self : Dict ) ->Tuple:
'''simple docstring'''
if self.task == "multiple-choice":
_UpperCAmelCase = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
_UpperCAmelCase = {0: """batch""", 1: """sequence"""}
return OrderedDict(
[
("""input_ids""", dynamic_axis),
("""attention_mask""", dynamic_axis),
] ) | 716 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
a : Tuple = {
'''configuration_pegasus_x''': ['''PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''PegasusXConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : List[str] = [
'''PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''PegasusXForConditionalGeneration''',
'''PegasusXModel''',
'''PegasusXPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_pegasus_x import PEGASUS_X_PRETRAINED_CONFIG_ARCHIVE_MAP, PegasusXConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_pegasus_x import (
PEGASUS_X_PRETRAINED_MODEL_ARCHIVE_LIST,
PegasusXForConditionalGeneration,
PegasusXModel,
PegasusXPreTrainedModel,
)
else:
import sys
a : Dict = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 19 | 0 |
"""simple docstring"""
import gc
import tempfile
import unittest
import numpy as np
import torch
from diffusers import VersatileDiffusionPipeline
from diffusers.utils.testing_utils import load_image, nightly, require_torch_gpu, torch_device
a : Dict = False
class a_ ( unittest.TestCase ):
pass
@nightly
@require_torch_gpu
class a_ ( unittest.TestCase ):
def _snake_case ( self : str ) ->Any:
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _snake_case ( self : List[str] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.7_5 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
with tempfile.TemporaryDirectory() as tmpdirname:
pipe.save_pretrained(lowerCamelCase__ )
_UpperCAmelCase = VersatileDiffusionPipeline.from_pretrained(lowerCamelCase__ , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase = generator.manual_seed(0 )
_UpperCAmelCase = pipe.dual_guided(
prompt="""first prompt""" , image=lowerCamelCase__ , text_to_image_strength=0.7_5 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=2 , output_type="""numpy""" , ).images
assert np.abs(image - new_image ).sum() < 1e-5, "Models don't have the same forward pass"
def _snake_case ( self : Union[str, Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = VersatileDiffusionPipeline.from_pretrained("""shi-labs/versatile-diffusion""" , torch_dtype=torch.floataa )
pipe.to(lowerCamelCase__ )
pipe.set_progress_bar_config(disable=lowerCamelCase__ )
_UpperCAmelCase = '''cyberpunk 2077'''
_UpperCAmelCase = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/versatile_diffusion/benz.jpg""" )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe.dual_guided(
prompt=lowerCamelCase__ , image=lowerCamelCase__ , text_to_image_strength=0.7_5 , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" , ).images
_UpperCAmelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase = np.array([0.1_4_4_8, 0.1_6_1_9, 0.1_7_4_1, 0.1_0_8_6, 0.1_1_4_7, 0.1_1_2_8, 0.1_1_9_9, 0.1_1_6_5, 0.1_0_0_1] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_UpperCAmelCase = '''A painting of a squirrel eating a burger '''
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = pipe.text_to_image(
prompt=lowerCamelCase__ , generator=lowerCamelCase__ , guidance_scale=7.5 , num_inference_steps=50 , output_type="""numpy""" ).images
_UpperCAmelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase = np.array([0.3_3_6_7, 0.3_1_6_9, 0.2_6_5_6, 0.3_8_7_0, 0.4_7_9_0, 0.3_7_9_6, 0.4_0_0_9, 0.4_8_7_8, 0.4_7_7_8] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1
_UpperCAmelCase = pipe.image_variation(lowerCamelCase__ , generator=lowerCamelCase__ , output_type="""numpy""" ).images
_UpperCAmelCase = image[0, 2_53:2_56, 2_53:2_56, -1]
assert image.shape == (1, 5_12, 5_12, 3)
_UpperCAmelCase = np.array([0.3_0_7_6, 0.3_1_2_3, 0.3_2_8_4, 0.3_7_8_2, 0.3_7_7_0, 0.3_8_9_4, 0.4_2_9_7, 0.4_3_3_1, 0.4_4_5_6] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-1 | 717 |
"""simple docstring"""
import collections
import json
import math
import os
import re
import time
from fnmatch import fnmatch
from typing import Dict
import requests
from slack_sdk import WebClient
a : int = WebClient(token=os.environ['''CI_SLACK_BOT_TOKEN'''])
def _UpperCamelCase ( _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = test_results.split(""" """ )
_UpperCAmelCase = 0
_UpperCAmelCase = 0
# When the output is short enough, the output is surrounded by = signs: "== OUTPUT =="
# When it is too long, those signs are not present.
_UpperCAmelCase = expressions[-2] if """=""" in expressions[-1] else expressions[-1]
for i, expression in enumerate(_A ):
if "failed" in expression:
failed += int(expressions[i - 1] )
if "passed" in expression:
success += int(expressions[i - 1] )
return failed, success, time_spent
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
_UpperCAmelCase = {}
_UpperCAmelCase = None
_UpperCAmelCase = False
for line in failures_short_lines.split("""\n""" ):
if re.search(R"""_ \[doctest\]""" , _A ):
_UpperCAmelCase = True
_UpperCAmelCase = line.split(""" """ )[2]
elif in_error and not line.split(""" """ )[0].isdigit():
_UpperCAmelCase = line
_UpperCAmelCase = False
return failures
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : str , __UpperCamelCase : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = title
_UpperCAmelCase = doc_test_results["""time_spent"""].split(""",""" )[0]
_UpperCAmelCase = doc_test_results["""success"""]
_UpperCAmelCase = doc_test_results["""failures"""]
_UpperCAmelCase = self.n_success + self.n_failures
# Failures and success of the modeling tests
_UpperCAmelCase = doc_test_results
@property
def _snake_case ( self : int ) ->str:
'''simple docstring'''
_UpperCAmelCase = [self._time_spent]
_UpperCAmelCase = 0
for time in time_spent:
_UpperCAmelCase = time.split(""":""" )
# Time can be formatted as xx:xx:xx, as .xx, or as x.xx if the time spent was less than a minute.
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = [0, 0, time_parts[0]]
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = int(time_parts[0] ), int(time_parts[1] ), float(time_parts[2] )
total_secs += hours * 36_00 + minutes * 60 + seconds
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = total_secs // 36_00, (total_secs % 36_00) // 60, total_secs % 60
return f"""{int(__UpperCamelCase )}h{int(__UpperCamelCase )}m{int(__UpperCamelCase )}s"""
@property
def _snake_case ( self : List[Any] ) ->Dict:
'''simple docstring'''
return {"type": "header", "text": {"type": "plain_text", "text": self.title}}
@property
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": f"""🌞 There were no failures: all {self.n_tests} tests passed. The suite ran in {self.time}.""",
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self : Union[str, Any] ) ->Dict:
'''simple docstring'''
return {
"type": "section",
"text": {
"type": "plain_text",
"text": (
f"""There were {self.n_failures} failures, out of {self.n_tests} tests.\nThe suite ran in"""
f""" {self.time}."""
),
"emoji": True,
},
"accessory": {
"type": "button",
"text": {"type": "plain_text", "text": "Check Action results", "emoji": True},
"url": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = 40
_UpperCAmelCase = {k: v["""failed"""] for k, v in doc_test_results.items() if isinstance(__UpperCamelCase , __UpperCamelCase )}
_UpperCAmelCase = """"""
for category, failures in category_failures.items():
if len(__UpperCamelCase ) == 0:
continue
if report != "":
report += "\n\n"
report += f"""*{category} failures*:""".ljust(line_length // 2 ).rjust(line_length // 2 ) + "\n"
report += "`"
report += "`\n`".join(__UpperCamelCase )
report += "`"
return {
"type": "section",
"text": {
"type": "mrkdwn",
"text": f"""The following examples had failures:\n\n\n{report}\n""",
},
}
@property
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
_UpperCAmelCase = [self.header]
if self.n_failures > 0:
blocks.append(self.failures )
if self.n_failures > 0:
blocks.extend([self.category_failures] )
if self.n_failures == 0:
blocks.append(self.no_failures )
return json.dumps(__UpperCamelCase )
@staticmethod
def _snake_case ( ) ->Any:
'''simple docstring'''
_UpperCAmelCase = [
{
"""type""": """section""",
"""text""": {
"""type""": """plain_text""",
"""text""": """There was an issue running the tests.""",
},
"""accessory""": {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """Check Action results""", """emoji""": True},
"""url""": f"""https://github.com/huggingface/transformers/actions/runs/{os.environ["GITHUB_RUN_ID"]}""",
},
}
]
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(__UpperCamelCase )} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text="""There was an issue running the tests.""" , blocks=__UpperCamelCase , )
def _snake_case ( self : Tuple ) ->Optional[Any]:
'''simple docstring'''
print("""Sending the following payload""" )
print(json.dumps({"""blocks""": json.loads(self.payload )} ) )
_UpperCAmelCase = f"""{self.n_failures} failures out of {self.n_tests} tests,""" if self.n_failures else """All tests passed."""
_UpperCAmelCase = client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , blocks=self.payload , text=__UpperCamelCase , )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : Any , __UpperCamelCase : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase = """"""
for key, value in failures.items():
_UpperCAmelCase = value[:2_00] + """ [Truncated]""" if len(__UpperCamelCase ) > 2_50 else value
failures_text += f"""*{key}*\n_{value}_\n\n"""
_UpperCAmelCase = job_name
_UpperCAmelCase = {"""type""": """section""", """text""": {"""type""": """mrkdwn""", """text""": text}}
if job_link is not None:
_UpperCAmelCase = {
"""type""": """button""",
"""text""": {"""type""": """plain_text""", """text""": """GitHub Action job""", """emoji""": True},
"""url""": job_link,
}
return [
{"type": "header", "text": {"type": "plain_text", "text": title.upper(), "emoji": True}},
content,
{"type": "section", "text": {"type": "mrkdwn", "text": failures_text}},
]
def _snake_case ( self : int ) ->Optional[Any]:
'''simple docstring'''
if self.thread_ts is None:
raise ValueError("""Can only post reply if a post has been made.""" )
_UpperCAmelCase = self.doc_test_results.pop("""job_link""" )
self.doc_test_results.pop("""failures""" )
self.doc_test_results.pop("""success""" )
self.doc_test_results.pop("""time_spent""" )
_UpperCAmelCase = sorted(self.doc_test_results.items() , key=lambda __UpperCamelCase : t[0] )
for job, job_result in sorted_dict:
if len(job_result["""failures"""] ):
_UpperCAmelCase = f"""*Num failures* :{len(job_result["failed"] )} \n"""
_UpperCAmelCase = job_result["""failures"""]
_UpperCAmelCase = self.get_reply_blocks(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , text=__UpperCamelCase )
print("""Sending the following reply""" )
print(json.dumps({"""blocks""": blocks} ) )
client.chat_postMessage(
channel=os.environ["""CI_SLACK_CHANNEL_ID_DAILY"""] , text=f"""Results for {job}""" , blocks=__UpperCamelCase , thread_ts=self.thread_ts["""ts"""] , )
time.sleep(1 )
def _UpperCamelCase ( ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = os.environ["""GITHUB_RUN_ID"""]
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A ).json()
_UpperCAmelCase = {}
try:
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" ).json()
jobs.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return jobs
except Exception as e:
print("""Unknown error, could not fetch links.""" , _A )
return {}
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = {}
if os.path.exists(_A ):
_UpperCAmelCase = os.listdir(_A )
for file in files:
try:
with open(os.path.join(_A , _A ) , encoding="""utf-8""" ) as f:
_UpperCAmelCase = f.read()
except UnicodeDecodeError as e:
raise ValueError(F"""Could not open {os.path.join(_A , _A )}.""" ) from e
return _artifact
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
class a_ :
def __init__( self : List[Any] , __UpperCamelCase : str ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = name
_UpperCAmelCase = []
def __str__( self : int ) ->Optional[Any]:
'''simple docstring'''
return self.name
def _snake_case ( self : Dict , __UpperCamelCase : str ) ->int:
'''simple docstring'''
self.paths.append({"""name""": self.name, """path""": path} )
_UpperCAmelCase = {}
_UpperCAmelCase = filter(os.path.isdir , os.listdir() )
for directory in directories:
_UpperCAmelCase = directory
if artifact_name not in _available_artifacts:
_UpperCAmelCase = Artifact(_A )
_available_artifacts[artifact_name].add_path(_A )
return _available_artifacts
if __name__ == "__main__":
a : Dict = get_job_links()
a : Dict = retrieve_available_artifacts()
a : Optional[int] = collections.OrderedDict(
[
('''*.py''', '''API Examples'''),
('''*.md''', '''MD Examples'''),
]
)
# This dict will contain all the information relative to each doc test category:
# - failed: list of failed tests
# - failures: dict in the format 'test': 'error_message'
a : Dict = {
v: {
'''failed''': [],
'''failures''': {},
}
for v in docs.values()
}
# Link to the GitHub Action job
a : int = github_actions_job_links.get('''run_doctests''')
a : Tuple = available_artifacts['''doc_tests_gpu_test_reports'''].paths[0]
a : Optional[Any] = retrieve_artifact(artifact_path['''name'''])
if "stats" in artifact:
a , a , a : str = handle_test_results(artifact['''stats'''])
a : Tuple = failed
a : int = success
a : Any = time_spent[1:-1] + ''', '''
a : Dict = extract_first_line_failure(artifact['''failures_short'''])
for line in artifact["summary_short"].split('''\n'''):
if re.search('''FAILED''', line):
a : List[Any] = line.replace('''FAILED ''', '''''')
a : Tuple = line.split()[0].replace('''\n''', '''''')
if "::" in line:
a , a : Union[str, Any] = line.split('''::''')
else:
a , a : Optional[Any] = line, line
for file_regex in docs.keys():
if fnmatch(file_path, file_regex):
a : List[Any] = docs[file_regex]
doc_test_results[category]["failed"].append(test)
a : Optional[Any] = all_failures[test] if test in all_failures else '''N/A'''
a : List[str] = failure
break
a : List[Any] = Message('''🤗 Results of the doc tests.''', doc_test_results)
message.post()
message.post_reply() | 19 | 0 |
import logging
import random
import ray
from transformers import RagConfig, RagRetriever, RagTokenizer
from transformers.models.rag.retrieval_rag import CustomHFIndex
a : str = logging.getLogger(__name__)
class a_ :
def __init__( self : List[str] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = False
def _snake_case ( self : Any , __UpperCamelCase : Optional[int] , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int] , __UpperCamelCase : Any ) ->Optional[Any]:
'''simple docstring'''
if not self.initialized:
_UpperCAmelCase = RagRetriever(
__UpperCamelCase , question_encoder_tokenizer=__UpperCamelCase , generator_tokenizer=__UpperCamelCase , index=__UpperCamelCase , init_retrieval=__UpperCamelCase , )
_UpperCAmelCase = True
def _snake_case ( self : int ) ->Any:
'''simple docstring'''
self.retriever.index.init_index()
def _snake_case ( self : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : str ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.retriever._main_retrieve(__UpperCamelCase , __UpperCamelCase )
return doc_ids, retrieved_doc_embeds
class a_ ( UpperCamelCase_ ):
def __init__( self : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any]=None ) ->List[Any]:
'''simple docstring'''
if index is not None and index.is_initialized() and len(__UpperCamelCase ) > 0:
raise ValueError(
"""When using Ray for distributed fine-tuning, """
"""you\'ll need to provide the paths instead, """
"""as the dataset and the index are loaded """
"""separately. More info in examples/rag/use_own_knowledge_dataset.py """ )
super().__init__(
__UpperCamelCase , question_encoder_tokenizer=__UpperCamelCase , generator_tokenizer=__UpperCamelCase , index=__UpperCamelCase , init_retrieval=__UpperCamelCase , )
_UpperCAmelCase = retrieval_workers
if len(self.retrieval_workers ) > 0:
ray.get(
[
worker.create_rag_retriever.remote(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase , __UpperCamelCase )
for worker in self.retrieval_workers
] )
def _snake_case ( self : Optional[int] ) ->Dict:
'''simple docstring'''
logger.info("""initializing retrieval""" )
if len(self.retrieval_workers ) > 0:
ray.get([worker.init_retrieval.remote() for worker in self.retrieval_workers] )
else:
# Non-distributed training. Load index into this same process.
self.index.init_index()
def _snake_case ( self : Dict , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any] ) ->List[Any]:
'''simple docstring'''
if len(self.retrieval_workers ) > 0:
# Select a random retrieval actor.
_UpperCAmelCase = self.retrieval_workers[random.randint(0 , len(self.retrieval_workers ) - 1 )]
_UpperCAmelCase = ray.get(random_worker.retrieve.remote(__UpperCamelCase , __UpperCamelCase ) )
else:
_UpperCAmelCase = self._main_retrieve(__UpperCamelCase , __UpperCamelCase )
return retrieved_doc_embeds, doc_ids, self.index.get_doc_dicts(__UpperCamelCase )
@classmethod
def _snake_case ( cls : List[Any] , __UpperCamelCase : str , __UpperCamelCase : List[Any]=None , **__UpperCamelCase : Any ) ->Union[str, Any]:
'''simple docstring'''
return super(__UpperCamelCase , cls ).get_tokenizers(__UpperCamelCase , __UpperCamelCase , **__UpperCamelCase )
@classmethod
def _snake_case ( cls : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any]=None , **__UpperCamelCase : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase = kwargs.pop("""config""" , __UpperCamelCase ) or RagConfig.from_pretrained(__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = RagTokenizer.from_pretrained(__UpperCamelCase , config=__UpperCamelCase )
_UpperCAmelCase = rag_tokenizer.question_encoder
_UpperCAmelCase = rag_tokenizer.generator
if indexed_dataset is not None:
_UpperCAmelCase = '''custom'''
_UpperCAmelCase = CustomHFIndex(config.retrieval_vector_size , __UpperCamelCase )
else:
_UpperCAmelCase = cls._build_index(__UpperCamelCase )
return cls(
__UpperCamelCase , question_encoder_tokenizer=__UpperCamelCase , generator_tokenizer=__UpperCamelCase , retrieval_workers=__UpperCamelCase , index=__UpperCamelCase , ) | 718 |
"""simple docstring"""
import asyncio
import os
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
from copy import deepcopy
from distutils.util import strtobool
from enum import Enum
from importlib.util import find_spec
from pathlib import Path
from unittest.mock import patch
import pyarrow as pa
import pytest
import requests
from packaging import version
from datasets import config
if config.PY_VERSION < version.parse('''3.8'''):
import importlib_metadata
else:
import importlib.metadata as importlib_metadata
def _UpperCamelCase ( _A , _A=False ) -> str:
"""simple docstring"""
try:
_UpperCAmelCase = os.environ[key]
except KeyError:
# KEY isn't set, default to `default`.
_UpperCAmelCase = default
else:
# KEY is set, convert it to True or False.
try:
_UpperCAmelCase = strtobool(_A )
except ValueError:
# More values are supported, but let's keep the message simple.
raise ValueError(F"""If set, {key} must be yes or no.""" )
return _value
a : Union[str, Any] = parse_flag_from_env('''RUN_SLOW''', default=False)
a : Tuple = parse_flag_from_env('''RUN_REMOTE''', default=False)
a : Union[str, Any] = parse_flag_from_env('''RUN_LOCAL''', default=True)
a : int = parse_flag_from_env('''RUN_PACKAGED''', default=True)
# Compression
a : List[Any] = pytest.mark.skipif(not config.LZ4_AVAILABLE, reason='''test requires lz4''')
a : List[Any] = pytest.mark.skipif(not config.PY7ZR_AVAILABLE, reason='''test requires py7zr''')
a : Optional[Any] = pytest.mark.skipif(not config.ZSTANDARD_AVAILABLE, reason='''test requires zstandard''')
# Audio
a : int = pytest.mark.skipif(
# On Windows and OS X, soundfile installs sndfile
find_spec('''soundfile''') is None or version.parse(importlib_metadata.version('''soundfile''')) < version.parse('''0.12.0'''),
reason='''test requires sndfile>=0.12.1: \'pip install \"soundfile>=0.12.1\"\'; ''',
)
# Beam
a : Tuple = pytest.mark.skipif(
not config.BEAM_AVAILABLE or config.DILL_VERSION >= version.parse('''0.3.2'''),
reason='''test requires apache-beam and a compatible dill version''',
)
# Dill-cloudpickle compatibility
a : Any = pytest.mark.skipif(
config.DILL_VERSION <= version.parse('''0.3.2'''),
reason='''test requires dill>0.3.2 for cloudpickle compatibility''',
)
# Windows
a : int = pytest.mark.skipif(
sys.platform == '''win32''',
reason='''test should not be run on Windows''',
)
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import faiss # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires faiss""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
try:
import regex # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires regex""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
try:
import elasticsearch # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires elasticsearch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
try:
import sqlalchemy # noqa
except ImportError:
_UpperCAmelCase = unittest.skip("""test requires sqlalchemy""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
if not config.TORCH_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires PyTorch""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
if not config.TF_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires TensorFlow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
if not config.JAX_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires JAX""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not config.PIL_AVAILABLE:
_UpperCAmelCase = unittest.skip("""test requires Pillow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> str:
"""simple docstring"""
try:
import transformers # noqa F401
except ImportError:
return unittest.skip("""test requires transformers""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import tiktoken # noqa F401
except ImportError:
return unittest.skip("""test requires tiktoken""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import spacy # noqa F401
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> int:
"""simple docstring"""
def _require_spacy_model(_A ):
try:
import spacy # noqa F401
spacy.load(_A )
except ImportError:
return unittest.skip("""test requires spacy""" )(_A )
except OSError:
return unittest.skip("""test requires spacy model '{}'""".format(_A ) )(_A )
else:
return test_case
return _require_spacy_model
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
try:
import pyspark # noqa F401
except ImportError:
return unittest.skip("""test requires pyspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> List[Any]:
"""simple docstring"""
try:
import joblibspark # noqa F401
except ImportError:
return unittest.skip("""test requires joblibspark""" )(_A )
else:
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_slow_tests or _run_slow_tests == 0:
_UpperCAmelCase = unittest.skip("""test is slow""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Any:
"""simple docstring"""
if not _run_local_tests or _run_local_tests == 0:
_UpperCAmelCase = unittest.skip("""test is local""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
if not _run_packaged_tests or _run_packaged_tests == 0:
_UpperCAmelCase = unittest.skip("""test is packaged""" )(_A )
return test_case
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
if not _run_remote_tests or _run_remote_tests == 0:
_UpperCAmelCase = unittest.skip("""test requires remote""" )(_A )
return test_case
def _UpperCamelCase ( *_A ) -> Dict:
"""simple docstring"""
def decorate(cls ):
for name, fn in cls.__dict__.items():
if callable(_A ) and name.startswith("""test""" ):
for decorator in decorators:
_UpperCAmelCase = decorator(_A )
setattr(cls , _A , _A )
return cls
return decorate
class a_ ( _UpperCAmelCase ):
pass
class a_ ( _UpperCAmelCase ):
a : Any = 0
a : Optional[Any] = 1
a : int = 2
@contextmanager
def _UpperCamelCase ( _A=OfflineSimulationMode.CONNECTION_FAILS , _A=1e-16 ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = requests.Session().request
def timeout_request(_A , _A , _A , **_A ):
# Change the url to an invalid url so that the connection hangs
_UpperCAmelCase = """https://10.255.255.1"""
if kwargs.get("""timeout""" ) is None:
raise RequestWouldHangIndefinitelyError(
F"""Tried a call to {url} in offline mode with no timeout set. Please set a timeout.""" )
_UpperCAmelCase = timeout
try:
return online_request(_A , _A , **_A )
except Exception as e:
# The following changes in the error are just here to make the offline timeout error prettier
_UpperCAmelCase = url
_UpperCAmelCase = e.args[0]
_UpperCAmelCase = (max_retry_error.args[0].replace("""10.255.255.1""" , F"""OfflineMock[{url}]""" ),)
_UpperCAmelCase = (max_retry_error,)
raise
def raise_connection_error(_A , _A , **_A ):
raise requests.ConnectionError("""Offline mode is enabled.""" , request=_A )
if mode is OfflineSimulationMode.CONNECTION_FAILS:
with patch("""requests.Session.send""" , _A ):
yield
elif mode is OfflineSimulationMode.CONNECTION_TIMES_OUT:
# inspired from https://stackoverflow.com/a/904609
with patch("""requests.Session.request""" , _A ):
yield
elif mode is OfflineSimulationMode.HF_DATASETS_OFFLINE_SET_TO_1:
with patch("""datasets.config.HF_DATASETS_OFFLINE""" , _A ):
yield
else:
raise ValueError("""Please use a value from the OfflineSimulationMode enum.""" )
@contextmanager
def _UpperCamelCase ( *_A , **_A ) -> str:
"""simple docstring"""
_UpperCAmelCase = str(Path().resolve() )
with tempfile.TemporaryDirectory(*_A , **_A ) as tmp_dir:
try:
os.chdir(_A )
yield
finally:
os.chdir(_A )
@contextmanager
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory > 0, "Arrow memory didn't increase."
@contextmanager
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
import gc
gc.collect()
_UpperCAmelCase = pa.total_allocated_bytes()
yield
assert pa.total_allocated_bytes() - previous_allocated_memory <= 0, "Arrow memory wasn't expected to increase."
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
return deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist() == deepcopy(_A ).integers(0 , 1_0_0 , 1_0 ).tolist()
def _UpperCamelCase ( _A ) -> Tuple:
"""simple docstring"""
import decorator
from requests.exceptions import HTTPError
def _wrapper(_A , *_A , **_A ):
try:
return func(*_A , **_A )
except HTTPError as err:
if str(_A ).startswith("""500""" ) or str(_A ).startswith("""502""" ):
pytest.xfail(str(_A ) )
raise err
return decorator.decorator(_wrapper , _A )
class a_ :
def __init__( self : int , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[str] ) ->int:
'''simple docstring'''
_UpperCAmelCase = returncode
_UpperCAmelCase = stdout
_UpperCAmelCase = stderr
async def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
while True:
_UpperCAmelCase = await stream.readline()
if line:
callback(_A )
else:
break
async def _UpperCamelCase ( _A , _A=None , _A=None , _A=None , _A=False , _A=False ) -> _RunOutput:
"""simple docstring"""
if echo:
print("""\nRunning: """ , """ """.join(_A ) )
_UpperCAmelCase = await asyncio.create_subprocess_exec(
cmd[0] , *cmd[1:] , stdin=_A , stdout=asyncio.subprocess.PIPE , stderr=asyncio.subprocess.PIPE , env=_A , )
# note: there is a warning for a possible deadlock when using `wait` with huge amounts of data in the pipe
# https://docs.python.org/3/library/asyncio-subprocess.html#asyncio.asyncio.subprocess.Process.wait
#
# If it starts hanging, will need to switch to the following code. The problem is that no data
# will be seen until it's done and if it hangs for example there will be no debug info.
# out, err = await p.communicate()
# return _RunOutput(p.returncode, out, err)
_UpperCAmelCase = []
_UpperCAmelCase = []
def tee(_A , _A , _A , _A="" ):
_UpperCAmelCase = line.decode("""utf-8""" ).rstrip()
sink.append(_A )
if not quiet:
print(_A , _A , file=_A )
# XXX: the timeout doesn't seem to make any difference here
await asyncio.wait(
[
_read_stream(p.stdout , lambda _A : tee(_A , _A , sys.stdout , label="""stdout:""" ) ),
_read_stream(p.stderr , lambda _A : tee(_A , _A , sys.stderr , label="""stderr:""" ) ),
] , timeout=_A , )
return _RunOutput(await p.wait() , _A , _A )
def _UpperCamelCase ( _A , _A=None , _A=None , _A=1_8_0 , _A=False , _A=True ) -> _RunOutput:
"""simple docstring"""
_UpperCAmelCase = asyncio.get_event_loop()
_UpperCAmelCase = loop.run_until_complete(
_stream_subprocess(_A , env=_A , stdin=_A , timeout=_A , quiet=_A , echo=_A ) )
_UpperCAmelCase = """ """.join(_A )
if result.returncode > 0:
_UpperCAmelCase = """\n""".join(result.stderr )
raise RuntimeError(
F"""'{cmd_str}' failed with returncode {result.returncode}\n\n"""
F"""The combined stderr from workers follows:\n{stderr}""" )
# check that the subprocess actually did run and produced some output, should the test rely on
# the remote side to do the testing
if not result.stdout and not result.stderr:
raise RuntimeError(F"""'{cmd_str}' produced no output.""" )
return result
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = os.environ.get("""PYTEST_XDIST_WORKER""" , """gw0""" )
_UpperCAmelCase = re.sub(R"""^gw""" , """""" , _A , 0 , re.M )
return int(_A )
def _UpperCamelCase ( ) -> Tuple:
"""simple docstring"""
_UpperCAmelCase = 2_9_5_0_0
_UpperCAmelCase = pytest_xdist_worker_id()
return port + uniq_delta | 19 | 0 |
"""simple docstring"""
def _UpperCamelCase ( _A , _A ) -> Union[str, Any]:
"""simple docstring"""
if mass < 0:
raise ValueError("""The mass of a body cannot be negative""" )
return 0.5 * mass * abs(UpperCamelCase__ ) * abs(UpperCamelCase__ )
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True) | 719 |
"""simple docstring"""
from pathlib import PurePosixPath
from typing import Optional
import fsspec
from fsspec import AbstractFileSystem
from huggingface_hub.hf_api import DatasetInfo
from ..utils.file_utils import get_authentication_headers_for_url
from ..utils.hub import hf_hub_url
class a_ ( _UpperCAmelCase ):
a : List[Any] = ''
a : Union[str, Any] = 'hf-legacy' # "hf://"" is reserved for hffs
def __init__( self : Tuple , __UpperCamelCase : Optional[DatasetInfo] = None , __UpperCamelCase : Optional[str] = None , **__UpperCamelCase : Any , ) ->Any:
'''simple docstring'''
super().__init__(self , **__UpperCamelCase )
_UpperCAmelCase = repo_info
_UpperCAmelCase = token
_UpperCAmelCase = None
def _snake_case ( self : List[str] ) ->List[str]:
'''simple docstring'''
if self.dir_cache is None:
_UpperCAmelCase = {}
for hf_file in self.repo_info.siblings:
# TODO(QL): add sizes
_UpperCAmelCase = {
"""name""": hf_file.rfilename,
"""size""": None,
"""type""": """file""",
}
self.dir_cache.update(
{
str(__UpperCamelCase ): {"""name""": str(__UpperCamelCase ), """size""": None, """type""": """directory"""}
for d in list(PurePosixPath(hf_file.rfilename ).parents )[:-1]
} )
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : str = "rb" , **__UpperCamelCase : Any , ) ->List[str]:
'''simple docstring'''
if not isinstance(self.repo_info , __UpperCamelCase ):
raise NotImplementedError(f"""Open is only implemented for dataset repositories, but got {self.repo_info}""" )
_UpperCAmelCase = hf_hub_url(self.repo_info.id , __UpperCamelCase , revision=self.repo_info.sha )
return fsspec.open(
__UpperCamelCase , mode=__UpperCamelCase , headers=get_authentication_headers_for_url(__UpperCamelCase , use_auth_token=self.token ) , client_kwargs={"""trust_env""": True} , ).open()
def _snake_case ( self : int , __UpperCamelCase : int , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = self._strip_protocol(__UpperCamelCase )
if path in self.dir_cache:
return self.dir_cache[path]
else:
raise FileNotFoundError(__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Tuple=False , **__UpperCamelCase : List[str] ) ->Optional[Any]:
'''simple docstring'''
self._get_dirs()
_UpperCAmelCase = PurePosixPath(path.strip("""/""" ) )
_UpperCAmelCase = {}
for p, f in self.dir_cache.items():
_UpperCAmelCase = PurePosixPath(p.strip("""/""" ) )
_UpperCAmelCase = p.parent
if root == path:
_UpperCAmelCase = f
_UpperCAmelCase = list(paths.values() )
if detail:
return out
else:
return sorted(f["""name"""] for f in out ) | 19 | 0 |
import gc
import unittest
from transformers import MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, FillMaskPipeline, pipeline
from transformers.pipelines import PipelineException
from transformers.testing_utils import (
is_pipeline_test,
is_torch_available,
nested_simplify,
require_tf,
require_torch,
require_torch_gpu,
slow,
)
from .test_pipelines_common import ANY
@is_pipeline_test
class a_ ( unittest.TestCase ):
a : int = MODEL_FOR_MASKED_LM_MAPPING
a : Tuple = TF_MODEL_FOR_MASKED_LM_MAPPING
def _snake_case ( self : Tuple ) ->int:
'''simple docstring'''
super().tearDown()
# clean-up as much as possible GPU memory occupied by PyTorch
gc.collect()
if is_torch_available():
import torch
torch.cuda.empty_cache()
@require_tf
def _snake_case ( self : int ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""tf""" )
_UpperCAmelCase = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=6 ) , [
{"""sequence""": """My name is grouped""", """score""": 2.1e-05, """token""": 3_80_15, """token_str""": """ grouped"""},
{"""sequence""": """My name is accuser""", """score""": 2.1e-05, """token""": 2_55_06, """token_str""": """ accuser"""},
] , )
_UpperCAmelCase = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is grouped""",
"""score""": 2.1e-05,
"""token""": 3_80_15,
"""token_str""": """ grouped""",
},
{
"""sequence""": """The largest city in France is accuser""",
"""score""": 2.1e-05,
"""token""": 2_55_06,
"""token_str""": """ accuser""",
},
] , )
_UpperCAmelCase = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=6 ) , [
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_36_06, """token_str""": """ Clara"""},
{"""sequence""": """My name is Patrick""", """score""": 2e-05, """token""": 34_99, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 1.9e-05, """token""": 29_41, """token_str""": """ Te"""},
] , )
@require_torch
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , top_k=2 , framework="""pt""" )
_UpperCAmelCase = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=6 ) , [
{"""sequence""": """My name is Maul""", """score""": 2.2e-05, """token""": 3_56_76, """token_str""": """ Maul"""},
{"""sequence""": """My name isELS""", """score""": 2.2e-05, """token""": 1_64_16, """token_str""": """ELS"""},
] , )
_UpperCAmelCase = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=6 ) , [
{
"""sequence""": """The largest city in France is Maul""",
"""score""": 2.2e-05,
"""token""": 3_56_76,
"""token_str""": """ Maul""",
},
{"""sequence""": """The largest city in France isELS""", """score""": 2.2e-05, """token""": 1_64_16, """token_str""": """ELS"""},
] , )
_UpperCAmelCase = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=6 ) , [
{"""sequence""": """My name is Patrick""", """score""": 2.1e-05, """token""": 34_99, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Te""", """score""": 2e-05, """token""": 29_41, """token_str""": """ Te"""},
{"""sequence""": """My name is Clara""", """score""": 2e-05, """token""": 1_36_06, """token_str""": """ Clara"""},
] , )
_UpperCAmelCase = unmasker("""My name is <mask> <mask>""" , top_k=2 )
self.assertEqual(
nested_simplify(__UpperCamelCase , decimals=6 ) , [
[
{
"""score""": 2.2e-05,
"""token""": 3_56_76,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is Maul<mask></s>""",
},
{"""score""": 2.2e-05, """token""": 1_64_16, """token_str""": """ELS""", """sequence""": """<s>My name isELS<mask></s>"""},
],
[
{
"""score""": 2.2e-05,
"""token""": 3_56_76,
"""token_str""": """ Maul""",
"""sequence""": """<s>My name is<mask> Maul</s>""",
},
{"""score""": 2.2e-05, """token""": 1_64_16, """token_str""": """ELS""", """sequence""": """<s>My name is<mask>ELS</s>"""},
],
] , )
@require_torch_gpu
def _snake_case ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = pipeline("""fill-mask""" , model="""hf-internal-testing/tiny-random-distilbert""" , device=0 , framework="""pt""" )
# convert model to fp16
pipe.model.half()
_UpperCAmelCase = pipe("""Paris is the [MASK] of France.""" )
# We actually don't care about the result, we just want to make sure
# it works, meaning the float16 tensor got casted back to float32
# for postprocessing.
self.assertIsInstance(__UpperCamelCase , __UpperCamelCase )
@slow
@require_torch
def _snake_case ( self : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""pt""" )
self.run_large_test(__UpperCamelCase )
@slow
@require_tf
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = pipeline(task="""fill-mask""" , model="""distilroberta-base""" , top_k=2 , framework="""tf""" )
self.run_large_test(__UpperCamelCase )
def _snake_case ( self : Dict , __UpperCamelCase : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = unmasker("""My name is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
{"""sequence""": """My name is John""", """score""": 0.0_0_8, """token""": 6_10, """token_str""": """ John"""},
{"""sequence""": """My name is Chris""", """score""": 0.0_0_7, """token""": 15_73, """token_str""": """ Chris"""},
] , )
_UpperCAmelCase = unmasker("""The largest city in France is <mask>""" )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
{
"""sequence""": """The largest city in France is Paris""",
"""score""": 0.2_5_1,
"""token""": 22_01,
"""token_str""": """ Paris""",
},
{
"""sequence""": """The largest city in France is Lyon""",
"""score""": 0.2_1_4,
"""token""": 1_27_90,
"""token_str""": """ Lyon""",
},
] , )
_UpperCAmelCase = unmasker("""My name is <mask>""" , targets=[""" Patrick""", """ Clara""", """ Teven"""] , top_k=3 )
self.assertEqual(
nested_simplify(__UpperCamelCase ) , [
{"""sequence""": """My name is Patrick""", """score""": 0.0_0_5, """token""": 34_99, """token_str""": """ Patrick"""},
{"""sequence""": """My name is Clara""", """score""": 0.0_0_0, """token""": 1_36_06, """token_str""": """ Clara"""},
{"""sequence""": """My name is Te""", """score""": 0.0_0_0, """token""": 29_41, """token_str""": """ Te"""},
] , )
@require_torch
def _snake_case ( self : List[str] ) ->Any:
'''simple docstring'''
_UpperCAmelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""pt""" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(__UpperCamelCase , [] )
@require_tf
def _snake_case ( self : Tuple ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = pipeline(task="""fill-mask""" , model="""sshleifer/tiny-distilroberta-base""" , framework="""tf""" )
_UpperCAmelCase = None
_UpperCAmelCase = None
self.run_pipeline_test(__UpperCamelCase , [] )
def _snake_case ( self : str , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
if tokenizer is None or tokenizer.mask_token_id is None:
self.skipTest("""The provided tokenizer has no mask token, (probably reformer or wav2vec2)""" )
_UpperCAmelCase = FillMaskPipeline(model=__UpperCamelCase , tokenizer=__UpperCamelCase )
_UpperCAmelCase = [
f"""This is another {tokenizer.mask_token} test""",
]
return fill_masker, examples
def _snake_case ( self : Any , __UpperCamelCase : Any , __UpperCamelCase : Optional[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = fill_masker.tokenizer
_UpperCAmelCase = fill_masker.model
_UpperCAmelCase = fill_masker(
f"""This is a {tokenizer.mask_token}""" , )
self.assertEqual(
__UpperCamelCase , [
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
] , )
_UpperCAmelCase = fill_masker([f"""This is a {tokenizer.mask_token}"""] )
self.assertEqual(
__UpperCamelCase , [
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
] , )
_UpperCAmelCase = fill_masker([f"""This is a {tokenizer.mask_token}""", f"""Another {tokenizer.mask_token} great test."""] )
self.assertEqual(
__UpperCamelCase , [
[
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
],
[
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
],
] , )
with self.assertRaises(__UpperCamelCase ):
fill_masker([None] )
# No mask_token is not supported
with self.assertRaises(__UpperCamelCase ):
fill_masker("""This is""" )
self.run_test_top_k(__UpperCamelCase , __UpperCamelCase )
self.run_test_targets(__UpperCamelCase , __UpperCamelCase )
self.run_test_top_k_targets(__UpperCamelCase , __UpperCamelCase )
self.fill_mask_with_duplicate_targets_and_top_k(__UpperCamelCase , __UpperCamelCase )
self.fill_mask_with_multiple_masks(__UpperCamelCase , __UpperCamelCase )
def _snake_case ( self : str , __UpperCamelCase : Any , __UpperCamelCase : Optional[int] ) ->str:
'''simple docstring'''
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = sorted(vocab.keys() )[:2]
# Pipeline argument
_UpperCAmelCase = FillMaskPipeline(model=__UpperCamelCase , tokenizer=__UpperCamelCase , targets=__UpperCamelCase )
_UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__UpperCamelCase , [
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __UpperCamelCase )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCamelCase ) )
# Call argument
_UpperCAmelCase = FillMaskPipeline(model=__UpperCamelCase , tokenizer=__UpperCamelCase )
_UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__UpperCamelCase )
self.assertEqual(
__UpperCamelCase , [
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
] , )
_UpperCAmelCase = {vocab[el] for el in targets}
self.assertEqual({el["""token"""] for el in outputs} , __UpperCamelCase )
_UpperCAmelCase = [tokenizer.decode([x] ) for x in target_ids]
self.assertEqual({el["""token_str"""] for el in outputs} , set(__UpperCamelCase ) )
# Score equivalence
_UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__UpperCamelCase )
_UpperCAmelCase = [top_mask["""token_str"""] for top_mask in outputs]
_UpperCAmelCase = [top_mask["""score"""] for top_mask in outputs]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__UpperCamelCase ) == set(__UpperCamelCase ):
_UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=__UpperCamelCase )
_UpperCAmelCase = [top_mask["""score"""] for top_mask in unmasked_targets]
self.assertEqual(nested_simplify(__UpperCamelCase ) , nested_simplify(__UpperCamelCase ) )
# Raises with invalid
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[] )
# For some tokenizers, `""` is actually in the vocabulary and the expected error won't raised
if "" not in tokenizer.get_vocab():
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets=[""""""] )
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , targets="""""" )
def _snake_case ( self : Any , __UpperCamelCase : Optional[Any] , __UpperCamelCase : int ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = FillMaskPipeline(model=__UpperCamelCase , tokenizer=__UpperCamelCase , top_k=2 )
_UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" )
self.assertEqual(
__UpperCamelCase , [
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
] , )
_UpperCAmelCase = FillMaskPipeline(model=__UpperCamelCase , tokenizer=__UpperCamelCase )
_UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__UpperCamelCase , [
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
] , )
self.assertEqual(nested_simplify(__UpperCamelCase ) , nested_simplify(__UpperCamelCase ) )
def _snake_case ( self : str , __UpperCamelCase : List[str] , __UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = tokenizer.get_vocab()
_UpperCAmelCase = FillMaskPipeline(model=__UpperCamelCase , tokenizer=__UpperCamelCase )
# top_k=2, ntargets=3
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=2 , targets=__UpperCamelCase )
# If we use the most probably targets, and filter differently, we should still
# have the same results
_UpperCAmelCase = [el["""token_str"""] for el in sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x["score"] , reverse=__UpperCamelCase )]
# For some BPE tokenizers, `</w>` is removed during decoding, so `token_str` won't be the same as in `targets`.
if set(__UpperCamelCase ).issubset(__UpperCamelCase ):
_UpperCAmelCase = fill_masker(f"""This is a {tokenizer.mask_token}""" , top_k=3 , targets=__UpperCamelCase )
# They should yield exactly the same result
self.assertEqual(nested_simplify(__UpperCamelCase ) , nested_simplify(__UpperCamelCase ) )
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Dict ) ->int:
'''simple docstring'''
_UpperCAmelCase = FillMaskPipeline(model=__UpperCamelCase , tokenizer=__UpperCamelCase )
_UpperCAmelCase = tokenizer.get_vocab()
# String duplicates + id duplicates
_UpperCAmelCase = sorted(vocab.keys() )[:3]
_UpperCAmelCase = [targets[0], targets[1], targets[0], targets[2], targets[1]]
_UpperCAmelCase = fill_masker(f"""My name is {tokenizer.mask_token}""" , targets=__UpperCamelCase , top_k=10 )
# The target list contains duplicates, so we can't output more
# than them
self.assertEqual(len(__UpperCamelCase ) , 3 )
def _snake_case ( self : str , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = FillMaskPipeline(model=__UpperCamelCase , tokenizer=__UpperCamelCase )
_UpperCAmelCase = fill_masker(
f"""This is a {tokenizer.mask_token} {tokenizer.mask_token} {tokenizer.mask_token}""" , top_k=2 )
self.assertEqual(
__UpperCamelCase , [
[
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
],
[
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
],
[
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
{"""sequence""": ANY(__UpperCamelCase ), """score""": ANY(__UpperCamelCase ), """token""": ANY(__UpperCamelCase ), """token_str""": ANY(__UpperCamelCase )},
],
] , ) | 720 |
"""simple docstring"""
import itertools
import os
from collections import Counter, defaultdict
from concurrent.futures import ThreadPoolExecutor, as_completed
import numpy as np
import datasets
from .execute import check_correctness
a : Optional[Any] = '''\
@misc{chen2021evaluating,
title={Evaluating Large Language Models Trained on Code},
author={Mark Chen and Jerry Tworek and Heewoo Jun and Qiming Yuan \
and Henrique Ponde de Oliveira Pinto and Jared Kaplan and Harri Edwards \
and Yuri Burda and Nicholas Joseph and Greg Brockman and Alex Ray \
and Raul Puri and Gretchen Krueger and Michael Petrov and Heidy Khlaaf \
and Girish Sastry and Pamela Mishkin and Brooke Chan and Scott Gray \
and Nick Ryder and Mikhail Pavlov and Alethea Power and Lukasz Kaiser \
and Mohammad Bavarian and Clemens Winter and Philippe Tillet \
and Felipe Petroski Such and Dave Cummings and Matthias Plappert \
and Fotios Chantzis and Elizabeth Barnes and Ariel Herbert-Voss \
and William Hebgen Guss and Alex Nichol and Alex Paino and Nikolas Tezak \
and Jie Tang and Igor Babuschkin and Suchir Balaji and Shantanu Jain \
and William Saunders and Christopher Hesse and Andrew N. Carr \
and Jan Leike and Josh Achiam and Vedant Misra and Evan Morikawa \
and Alec Radford and Matthew Knight and Miles Brundage and Mira Murati \
and Katie Mayer and Peter Welinder and Bob McGrew and Dario Amodei \
and Sam McCandlish and Ilya Sutskever and Wojciech Zaremba},
year={2021},
eprint={2107.03374},
archivePrefix={arXiv},
primaryClass={cs.LG}
}
'''
a : List[str] = '''\
This metric implements the evaluation harness for the HumanEval problem solving dataset
described in the paper "Evaluating Large Language Models Trained on Code"
(https://arxiv.org/abs/2107.03374).
'''
a : Any = '''
Calculates how good are predictions given some references, using certain scores
Args:
predictions: list of candidates to evaluate. Each candidates should be a list
of strings with several code candidates to solve the problem.
references: a list with a test for each prediction. Each test should evaluate the
correctness of a code candidate.
k: number of code candidates to consider in the evaluation (Default: [1, 10, 100])
num_workers: number of workers used to evaluate the canidate programs (Default: 4).
timeout:
Returns:
pass_at_k: dict with pass rates for each k
results: dict with granular results of each unittest
Examples:
>>> code_eval = datasets.load_metric("code_eval")
>>> test_cases = ["assert add(2,3)==5"]
>>> candidates = [["def add(a,b): return a*b", "def add(a, b): return a+b"]]
>>> pass_at_k, results = code_eval.compute(references=test_cases, predictions=candidates, k=[1, 2])
>>> print(pass_at_k)
{\'pass@1\': 0.5, \'pass@2\': 1.0}
'''
a : int = '''
################################################################################
!!!WARNING!!!
################################################################################
The "code_eval" metric executes untrusted model-generated code in Python.
Although it is highly unlikely that model-generated code will do something
overtly malicious in response to this test suite, model-generated code may act
destructively due to a lack of model capability or alignment.
Users are strongly encouraged to sandbox this evaluation suite so that it
does not perform destructive actions on their host or network. For more
information on how OpenAI sandboxes its code, see the paper "Evaluating Large
Language Models Trained on Code" (https://arxiv.org/abs/2107.03374).
Once you have read this disclaimer and taken appropriate precautions,
set the environment variable HF_ALLOW_CODE_EVAL="1". Within Python you can to this
with:
>>> import os
>>> os.environ["HF_ALLOW_CODE_EVAL"] = "1"
################################################################################\
'''
a : List[Any] = '''The MIT License
Copyright (c) OpenAI (https://openai.com)
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class a_ ( datasets.Metric ):
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
return datasets.MetricInfo(
# This is the description that will appear on the metrics page.
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
"""predictions""": datasets.Sequence(datasets.Value("""string""" ) ),
"""references""": datasets.Value("""string""" ),
} ) , homepage="""https://github.com/openai/human-eval""" , codebase_urls=["""https://github.com/openai/human-eval"""] , reference_urls=["""https://github.com/openai/human-eval"""] , license=_LICENSE , )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any]=[1, 10, 1_00] , __UpperCamelCase : Dict=4 , __UpperCamelCase : Tuple=3.0 ) ->Union[str, Any]:
'''simple docstring'''
if os.getenv("""HF_ALLOW_CODE_EVAL""" , 0 ) != "1":
raise ValueError(_WARNING )
if os.name == "nt":
raise NotImplementedError("""This metric is currently not supported on Windows.""" )
with ThreadPoolExecutor(max_workers=__UpperCamelCase ) as executor:
_UpperCAmelCase = []
_UpperCAmelCase = Counter()
_UpperCAmelCase = 0
_UpperCAmelCase = defaultdict(__UpperCamelCase )
for task_id, (candidates, test_case) in enumerate(zip(__UpperCamelCase , __UpperCamelCase ) ):
for candidate in candidates:
_UpperCAmelCase = candidate + """\n""" + test_case
_UpperCAmelCase = (test_program, timeout, task_id, completion_id[task_id])
_UpperCAmelCase = executor.submit(__UpperCamelCase , *__UpperCamelCase )
futures.append(__UpperCamelCase )
completion_id[task_id] += 1
n_samples += 1
for future in as_completed(__UpperCamelCase ):
_UpperCAmelCase = future.result()
results[result["task_id"]].append((result["""completion_id"""], result) )
_UpperCAmelCase ,_UpperCAmelCase = [], []
for result in results.values():
result.sort()
_UpperCAmelCase = [r[1]["""passed"""] for r in result]
total.append(len(__UpperCamelCase ) )
correct.append(sum(__UpperCamelCase ) )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = np.array(__UpperCamelCase )
_UpperCAmelCase = k
_UpperCAmelCase = {f"""pass@{k}""": estimate_pass_at_k(__UpperCamelCase , __UpperCamelCase , __UpperCamelCase ).mean() for k in ks if (total >= k).all()}
return pass_at_k, results
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
def estimator(_A , _A , _A ) -> float:
if n - c < k:
return 1.0
return 1.0 - np.prod(1.0 - k / np.arange(n - c + 1 , n + 1 ) )
if isinstance(_A , _A ):
_UpperCAmelCase = itertools.repeat(_A , len(_A ) )
else:
assert len(_A ) == len(_A )
_UpperCAmelCase = iter(_A )
return np.array([estimator(int(_A ) , int(_A ) , _A ) for n, c in zip(_A , _A )] ) | 19 | 0 |
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import GLPNImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : Optional[int] , __UpperCamelCase : List[str] , __UpperCamelCase : Optional[int]=7 , __UpperCamelCase : List[str]=3 , __UpperCamelCase : Dict=18 , __UpperCamelCase : List[str]=30 , __UpperCamelCase : List[Any]=4_00 , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : int=32 , __UpperCamelCase : Dict=True , ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size_divisor
_UpperCAmelCase = do_rescale
def _snake_case ( self : Union[str, Any] ) ->str:
'''simple docstring'''
return {
"do_resize": self.do_resize,
"size_divisor": self.size_divisor,
"do_rescale": self.do_rescale,
}
@require_torch
@require_vision
class a_ ( snake_case__ , unittest.TestCase ):
a : List[Any] = GLPNImageProcessor if is_vision_available() else None
def _snake_case ( self : Optional[Any] ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = GLPNImageProcessingTester(self )
@property
def _snake_case ( self : Dict ) ->Dict:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self : Dict ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_resize""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """size_divisor""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """resample""" ) )
self.assertTrue(hasattr(_SCREAMING_SNAKE_CASE , """do_rescale""" ) )
def _snake_case ( self : Optional[int] ) ->str:
'''simple docstring'''
pass
def _snake_case ( self : Union[str, Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , Image.Image )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , numpify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , np.ndarray )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 )
def _snake_case ( self : Optional[int] ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=_SCREAMING_SNAKE_CASE , torchify=_SCREAMING_SNAKE_CASE )
for image in image_inputs:
self.assertIsInstance(_SCREAMING_SNAKE_CASE , torch.Tensor )
# Test not batched input (GLPNImageProcessor doesn't support batching)
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertTrue(encoded_images.shape[-1] % self.image_processor_tester.size_divisor == 0 )
self.assertTrue(encoded_images.shape[-2] % self.image_processor_tester.size_divisor == 0 ) | 721 |
"""simple docstring"""
from collections.abc import Callable
import numpy as np
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> np.array:
"""simple docstring"""
_UpperCAmelCase = int(np.ceil((x_end - xa) / step_size ) )
_UpperCAmelCase = np.zeros((n + 1,) )
_UpperCAmelCase = ya
_UpperCAmelCase = xa
for k in range(_A ):
_UpperCAmelCase = y[k] + step_size * ode_func(_A , y[k] )
_UpperCAmelCase = y[k] + (
(step_size / 2) * (ode_func(_A , y[k] ) + ode_func(x + step_size , _A ))
)
x += step_size
return y
if __name__ == "__main__":
import doctest
doctest.testmod() | 19 | 0 |
"""simple docstring"""
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
a : str = logging.get_logger(__name__)
a : Dict = {
'''Salesforce/instruct-blip-flan-t5''': '''https://huggingface.co/Salesforce/instruct-blip-flan-t5/resolve/main/config.json''',
}
class a_ ( _UpperCAmelCase ):
a : Dict = 'instructblip_vision_model'
def __init__( self : Any , __UpperCamelCase : Any=14_08 , __UpperCamelCase : Optional[int]=61_44 , __UpperCamelCase : List[str]=39 , __UpperCamelCase : Union[str, Any]=16 , __UpperCamelCase : int=2_24 , __UpperCamelCase : int=14 , __UpperCamelCase : List[Any]="gelu" , __UpperCamelCase : Any=1e-6 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : Union[str, Any]=1e-10 , __UpperCamelCase : Dict=True , **__UpperCamelCase : Union[str, Any] , ) ->int:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = hidden_size
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = patch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = initializer_range
_UpperCAmelCase = attention_dropout
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = hidden_act
_UpperCAmelCase = qkv_bias
@classmethod
def _snake_case ( cls : Dict , __UpperCamelCase : Union[str, os.PathLike] , **__UpperCamelCase : str ) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__UpperCamelCase )
_UpperCAmelCase ,_UpperCAmelCase = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the vision config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_UpperCAmelCase = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class a_ ( _UpperCAmelCase ):
a : List[str] = 'instructblip_qformer'
def __init__( self : List[Any] , __UpperCamelCase : List[str]=3_05_22 , __UpperCamelCase : Optional[Any]=7_68 , __UpperCamelCase : Optional[int]=12 , __UpperCamelCase : Any=12 , __UpperCamelCase : Union[str, Any]=30_72 , __UpperCamelCase : Optional[Any]="gelu" , __UpperCamelCase : str=0.1 , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : List[Any]=5_12 , __UpperCamelCase : List[Any]=0.0_2 , __UpperCamelCase : Tuple=1e-12 , __UpperCamelCase : Union[str, Any]=0 , __UpperCamelCase : Optional[int]="absolute" , __UpperCamelCase : Dict=2 , __UpperCamelCase : Any=14_08 , **__UpperCamelCase : str , ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(pad_token_id=__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = vocab_size
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = hidden_act
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = max_position_embeddings
_UpperCAmelCase = initializer_range
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = position_embedding_type
_UpperCAmelCase = cross_attention_frequency
_UpperCAmelCase = encoder_hidden_size
@classmethod
def _snake_case ( cls : Tuple , __UpperCamelCase : Union[str, os.PathLike] , **__UpperCamelCase : List[Any] ) ->"PretrainedConfig":
'''simple docstring'''
cls._set_token_in_kwargs(__UpperCamelCase )
_UpperCAmelCase ,_UpperCAmelCase = cls.get_config_dict(__UpperCamelCase , **__UpperCamelCase )
# get the qformer config dict if we are loading from InstructBlipConfig
if config_dict.get("""model_type""" ) == "instructblip":
_UpperCAmelCase = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls , """model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f"""You are using a model of type {config_dict["model_type"]} to instantiate a model of type """
f"""{cls.model_type}. This is not supported for all configurations of models and can yield errors.""" )
return cls.from_dict(__UpperCamelCase , **__UpperCamelCase )
class a_ ( _UpperCAmelCase ):
a : List[str] = 'instructblip'
a : Any = True
def __init__( self : Optional[int] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[Any]=32 , **__UpperCamelCase : List[str] ) ->Union[str, Any]:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
if vision_config is None:
_UpperCAmelCase = {}
logger.info("""vision_config is None. initializing the InstructBlipVisionConfig with default values.""" )
if qformer_config is None:
_UpperCAmelCase = {}
logger.info("""qformer_config is None. Initializing the InstructBlipQFormerConfig with default values.""" )
if text_config is None:
_UpperCAmelCase = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
_UpperCAmelCase = InstructBlipVisionConfig(**__UpperCamelCase )
_UpperCAmelCase = InstructBlipQFormerConfig(**__UpperCamelCase )
_UpperCAmelCase = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
_UpperCAmelCase = CONFIG_MAPPING[text_model_type](**__UpperCamelCase )
_UpperCAmelCase = self.text_config.tie_word_embeddings
_UpperCAmelCase = self.text_config.is_encoder_decoder
_UpperCAmelCase = num_query_tokens
_UpperCAmelCase = self.vision_config.hidden_size
_UpperCAmelCase = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
_UpperCAmelCase = 1.0
_UpperCAmelCase = 0.0_2
@classmethod
def _snake_case ( cls : Optional[Any] , __UpperCamelCase : InstructBlipVisionConfig , __UpperCamelCase : InstructBlipQFormerConfig , __UpperCamelCase : PretrainedConfig , **__UpperCamelCase : int , ) ->Dict:
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() , qformer_config=qformer_config.to_dict() , text_config=text_config.to_dict() , **__UpperCamelCase , )
def _snake_case ( self : Dict ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = copy.deepcopy(self.__dict__ )
_UpperCAmelCase = self.vision_config.to_dict()
_UpperCAmelCase = self.qformer_config.to_dict()
_UpperCAmelCase = self.text_config.to_dict()
_UpperCAmelCase = self.__class__.model_type
return output | 700 |
"""simple docstring"""
import argparse
import logging
import os
import sys
import numpy as np
import onnxruntime
import torch
from bart_onnx.generation_onnx import BARTBeamSearchGenerator
from bart_onnx.reduce_onnx_size import remove_dup_initializers
import transformers
from transformers import BartForConditionalGeneration, BartTokenizer
logging.basicConfig(
format='''%(asctime)s | %(levelname)s | %(name)s | [%(filename)s:%(lineno)d] %(message)s''',
datefmt='''%Y-%m-%d %H:%M:%S''',
level=os.environ.get('''LOGLEVEL''', '''INFO''').upper(),
stream=sys.stdout,
)
a : List[str] = logging.getLogger(__name__)
a : int = {'''facebook/bart-base''': BartForConditionalGeneration}
a : Dict = {'''facebook/bart-base''': BartTokenizer}
def _UpperCamelCase ( ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = argparse.ArgumentParser(description="""Export Bart model + Beam Search to ONNX graph.""" )
parser.add_argument(
"""--validation_file""" , type=_A , default=_A , help="""A csv or a json file containing the validation data.""" )
parser.add_argument(
"""--max_length""" , type=_A , default=5 , help="""The maximum total input sequence length after tokenization.""" , )
parser.add_argument(
"""--num_beams""" , type=_A , default=_A , help=(
"""Number of beams to use for evaluation. This argument will be """
"""passed to ``model.generate``, which is used during ``evaluate`` and ``predict``."""
) , )
parser.add_argument(
"""--model_name_or_path""" , type=_A , help="""Path to pretrained model or model identifier from huggingface.co/models.""" , required=_A , )
parser.add_argument(
"""--config_name""" , type=_A , default=_A , help="""Pretrained config name or path if not the same as model_name""" , )
parser.add_argument(
"""--device""" , type=_A , default="""cpu""" , help="""Device where the model will be run""" , )
parser.add_argument("""--output_file_path""" , type=_A , default=_A , help="""Where to store the final ONNX file.""" )
_UpperCAmelCase = parser.parse_args()
return args
def _UpperCamelCase ( _A , _A="cpu" ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = model_dict[model_name].from_pretrained(_A ).to(_A )
_UpperCAmelCase = tokenizer_dict[model_name].from_pretrained(_A )
if model_name in ["facebook/bart-base"]:
_UpperCAmelCase = 0
_UpperCAmelCase = None
_UpperCAmelCase = 0
return huggingface_model, tokenizer
def _UpperCamelCase ( _A , _A , _A , _A , _A ) -> Optional[int]:
"""simple docstring"""
model.eval()
_UpperCAmelCase = None
_UpperCAmelCase = torch.jit.script(BARTBeamSearchGenerator(_A ) )
with torch.no_grad():
_UpperCAmelCase = """My friends are cool but they eat too many carbs."""
_UpperCAmelCase = tokenizer([ARTICLE_TO_SUMMARIZE] , max_length=1_0_2_4 , return_tensors="""pt""" ).to(model.device )
_UpperCAmelCase = model.generate(
inputs["""input_ids"""] , attention_mask=inputs["""attention_mask"""] , num_beams=_A , max_length=_A , early_stopping=_A , decoder_start_token_id=model.config.decoder_start_token_id , )
torch.onnx.export(
_A , (
inputs["""input_ids"""],
inputs["""attention_mask"""],
num_beams,
max_length,
model.config.decoder_start_token_id,
) , _A , opset_version=1_4 , input_names=["""input_ids""", """attention_mask""", """num_beams""", """max_length""", """decoder_start_token_id"""] , output_names=["""output_ids"""] , dynamic_axes={
"""input_ids""": {0: """batch""", 1: """seq"""},
"""output_ids""": {0: """batch""", 1: """seq_out"""},
} , example_outputs=_A , )
logger.info("""Model exported to {}""".format(_A ) )
_UpperCAmelCase = remove_dup_initializers(os.path.abspath(_A ) )
logger.info("""Deduplicated and optimized model written to {}""".format(_A ) )
_UpperCAmelCase = onnxruntime.InferenceSession(_A )
_UpperCAmelCase = ort_sess.run(
_A , {
"""input_ids""": inputs["""input_ids"""].cpu().numpy(),
"""attention_mask""": inputs["""attention_mask"""].cpu().numpy(),
"""num_beams""": np.array(_A ),
"""max_length""": np.array(_A ),
"""decoder_start_token_id""": np.array(model.config.decoder_start_token_id ),
} , )
np.testing.assert_allclose(summary_ids.cpu().numpy() , ort_out[0] , rtol=1e-3 , atol=1e-3 )
logger.info("""Model outputs from torch and ONNX Runtime are similar.""" )
logger.info("""Success.""" )
def _UpperCamelCase ( ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = parse_args()
_UpperCAmelCase = 5
_UpperCAmelCase = 4
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format="""%(asctime)s - %(levelname)s - %(name)s - %(message)s""" , datefmt="""%m/%d/%Y %H:%M:%S""" , level=logging.INFO , )
logger.setLevel(logging.INFO )
transformers.utils.logging.set_verbosity_error()
_UpperCAmelCase = torch.device(args.device )
_UpperCAmelCase ,_UpperCAmelCase = load_model_tokenizer(args.model_name_or_path , _A )
if model.config.decoder_start_token_id is None:
raise ValueError("""Make sure that `config.decoder_start_token_id` is correctly defined""" )
model.to(_A )
if args.max_length:
_UpperCAmelCase = args.max_length
if args.num_beams:
_UpperCAmelCase = args.num_beams
if args.output_file_path:
_UpperCAmelCase = args.output_file_path
else:
_UpperCAmelCase = """BART.onnx"""
logger.info("""Exporting model to ONNX""" )
export_and_validate_model(_A , _A , _A , _A , _A )
if __name__ == "__main__":
main() | 19 | 0 |
"""simple docstring"""
from ...configuration_utils import PretrainedConfig
from ...utils import logging
from ...utils.backbone_utils import BackboneConfigMixin, get_aligned_output_features_output_indices
a : Union[str, Any] = logging.get_logger(__name__)
a : List[Any] = {
'''shi-labs/nat-mini-in1k-224''': '''https://huggingface.co/shi-labs/nat-mini-in1k-224/resolve/main/config.json''',
# See all Nat models at https://huggingface.co/models?filter=nat
}
class a_ ( _UpperCAmelCase , _UpperCAmelCase ):
a : List[str] = 'nat'
a : List[Any] = {
'num_attention_heads': 'num_heads',
'num_hidden_layers': 'num_layers',
}
def __init__( self : str , __UpperCamelCase : Union[str, Any]=4 , __UpperCamelCase : Union[str, Any]=3 , __UpperCamelCase : List[str]=64 , __UpperCamelCase : Tuple=[3, 4, 6, 5] , __UpperCamelCase : Dict=[2, 4, 8, 16] , __UpperCamelCase : Any=7 , __UpperCamelCase : List[str]=3.0 , __UpperCamelCase : int=True , __UpperCamelCase : Optional[Any]=0.0 , __UpperCamelCase : Tuple=0.0 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Tuple="gelu" , __UpperCamelCase : Dict=0.0_2 , __UpperCamelCase : Any=1e-5 , __UpperCamelCase : int=0.0 , __UpperCamelCase : Optional[int]=None , __UpperCamelCase : Union[str, Any]=None , **__UpperCamelCase : List[Any] , ) ->str:
'''simple docstring'''
super().__init__(**__UpperCamelCase )
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = embed_dim
_UpperCAmelCase = depths
_UpperCAmelCase = len(__UpperCamelCase )
_UpperCAmelCase = num_heads
_UpperCAmelCase = kernel_size
_UpperCAmelCase = mlp_ratio
_UpperCAmelCase = qkv_bias
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = drop_path_rate
_UpperCAmelCase = hidden_act
_UpperCAmelCase = layer_norm_eps
_UpperCAmelCase = initializer_range
# we set the hidden_size attribute in order to make Nat work with VisionEncoderDecoderModel
# this indicates the channel dimension after the last stage of the model
_UpperCAmelCase = int(embed_dim * 2 ** (len(__UpperCamelCase ) - 1) )
_UpperCAmelCase = layer_scale_init_value
_UpperCAmelCase = ["""stem"""] + [f"""stage{idx}""" for idx in range(1 , len(__UpperCamelCase ) + 1 )]
_UpperCAmelCase ,_UpperCAmelCase = get_aligned_output_features_output_indices(
out_features=__UpperCamelCase , out_indices=__UpperCamelCase , stage_names=self.stage_names ) | 701 |
"""simple docstring"""
import argparse
import json
import math
import os
import time
import traceback
import zipfile
from collections import Counter
import requests
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{workflow_run_id}/jobs?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
job_links.update({job["""name"""]: job["""html_url"""] for job in result["""jobs"""]} )
return job_links
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = F"""https://api.github.com/repos/huggingface/transformers/actions/runs/{worflow_run_id}/artifacts?per_page=100"""
_UpperCAmelCase = requests.get(_A , headers=_A ).json()
_UpperCAmelCase = {}
try:
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
_UpperCAmelCase = math.ceil((result["""total_count"""] - 1_0_0) / 1_0_0 )
for i in range(_A ):
_UpperCAmelCase = requests.get(url + F"""&page={i + 2}""" , headers=_A ).json()
artifacts.update({artifact["""name"""]: artifact["""archive_download_url"""] for artifact in result["""artifacts"""]} )
return artifacts
except Exception:
print(F"""Unknown error, could not fetch links:\n{traceback.format_exc()}""" )
return {}
def _UpperCamelCase ( _A , _A , _A , _A ) -> List[str]:
"""simple docstring"""
_UpperCAmelCase = None
if token is not None:
_UpperCAmelCase = {"""Accept""": """application/vnd.github+json""", """Authorization""": F"""Bearer {token}"""}
_UpperCAmelCase = requests.get(_A , headers=_A , allow_redirects=_A )
_UpperCAmelCase = result.headers["""Location"""]
_UpperCAmelCase = requests.get(_A , allow_redirects=_A )
_UpperCAmelCase = os.path.join(_A , F"""{artifact_name}.zip""" )
with open(_A , """wb""" ) as fp:
fp.write(response.content )
def _UpperCamelCase ( _A , _A=None ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = None
with zipfile.ZipFile(_A ) as z:
for filename in z.namelist():
if not os.path.isdir(_A ):
# read the file
if filename in ["failures_line.txt", "summary_short.txt", "job_name.txt"]:
with z.open(_A ) as f:
for line in f:
_UpperCAmelCase = line.decode("""UTF-8""" ).strip()
if filename == "failures_line.txt":
try:
# `error_line` is the place where `error` occurs
_UpperCAmelCase = line[: line.index(""": """ )]
_UpperCAmelCase = line[line.index(""": """ ) + len(""": """ ) :]
errors.append([error_line, error] )
except Exception:
# skip un-related lines
pass
elif filename == "summary_short.txt" and line.startswith("""FAILED """ ):
# `test` is the test method that failed
_UpperCAmelCase = line[len("""FAILED """ ) :]
failed_tests.append(_A )
elif filename == "job_name.txt":
_UpperCAmelCase = line
if len(_A ) != len(_A ):
raise ValueError(
F"""`errors` and `failed_tests` should have the same number of elements. Got {len(_A )} for `errors` """
F"""and {len(_A )} for `failed_tests` instead. The test reports in {artifact_zip_path} have some"""
""" problem.""" )
_UpperCAmelCase = None
if job_name and job_links:
_UpperCAmelCase = job_links.get(_A , _A )
# A list with elements of the form (line of error, error, failed test)
_UpperCAmelCase = [x + [y] + [job_link] for x, y in zip(_A , _A )]
return result
def _UpperCamelCase ( _A , _A=None ) -> Union[str, Any]:
"""simple docstring"""
_UpperCAmelCase = []
_UpperCAmelCase = [os.path.join(_A , _A ) for p in os.listdir(_A ) if p.endswith(""".zip""" )]
for p in paths:
errors.extend(get_errors_from_single_artifact(_A , job_links=_A ) )
return errors
def _UpperCamelCase ( _A , _A=None ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = Counter()
counter.update([x[1] for x in logs] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {}
for error, count in counts:
if error_filter is None or error not in error_filter:
_UpperCAmelCase = {"""count""": count, """failed_tests""": [(x[2], x[0]) for x in logs if x[1] == error]}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Dict:
"""simple docstring"""
_UpperCAmelCase = test.split("""::""" )[0]
if test.startswith("""tests/models/""" ):
_UpperCAmelCase = test.split("""/""" )[2]
else:
_UpperCAmelCase = None
return test
def _UpperCamelCase ( _A , _A=None ) -> Any:
"""simple docstring"""
_UpperCAmelCase = [(x[0], x[1], get_model(x[2] )) for x in logs]
_UpperCAmelCase = [x for x in logs if x[2] is not None]
_UpperCAmelCase = {x[2] for x in logs}
_UpperCAmelCase = {}
for test in tests:
_UpperCAmelCase = Counter()
# count by errors in `test`
counter.update([x[1] for x in logs if x[2] == test] )
_UpperCAmelCase = counter.most_common()
_UpperCAmelCase = {error: count for error, count in counts if (error_filter is None or error not in error_filter)}
_UpperCAmelCase = sum(error_counts.values() )
if n_errors > 0:
_UpperCAmelCase = {"""count""": n_errors, """errors""": error_counts}
_UpperCAmelCase = dict(sorted(r.items() , key=lambda _A : item[1]["count"] , reverse=_A ) )
return r
def _UpperCamelCase ( _A ) -> Optional[int]:
"""simple docstring"""
_UpperCAmelCase = """| no. | error | status |"""
_UpperCAmelCase = """|-:|:-|:-|"""
_UpperCAmelCase = [header, sep]
for error in reduced_by_error:
_UpperCAmelCase = reduced_by_error[error]["""count"""]
_UpperCAmelCase = F"""| {count} | {error[:1_0_0]} | |"""
lines.append(_A )
return "\n".join(_A )
def _UpperCamelCase ( _A ) -> Optional[Any]:
"""simple docstring"""
_UpperCAmelCase = """| model | no. of errors | major error | count |"""
_UpperCAmelCase = """|-:|-:|-:|-:|"""
_UpperCAmelCase = [header, sep]
for model in reduced_by_model:
_UpperCAmelCase = reduced_by_model[model]["""count"""]
_UpperCAmelCase ,_UpperCAmelCase = list(reduced_by_model[model]["""errors"""].items() )[0]
_UpperCAmelCase = F"""| {model} | {count} | {error[:6_0]} | {_count} |"""
lines.append(_A )
return "\n".join(_A )
if __name__ == "__main__":
a : Tuple = argparse.ArgumentParser()
# Required parameters
parser.add_argument('''--workflow_run_id''', type=str, required=True, help='''A GitHub Actions workflow run id.''')
parser.add_argument(
'''--output_dir''',
type=str,
required=True,
help='''Where to store the downloaded artifacts and other result files.''',
)
parser.add_argument('''--token''', default=None, type=str, help='''A token that has actions:read permission.''')
a : Dict = parser.parse_args()
os.makedirs(args.output_dir, exist_ok=True)
a : Tuple = get_job_links(args.workflow_run_id, token=args.token)
a : Tuple = {}
# To deal with `workflow_call` event, where a job name is the combination of the job names in the caller and callee.
# For example, `PyTorch 1.11 / Model tests (models/albert, single-gpu)`.
if _job_links:
for k, v in _job_links.items():
# This is how GitHub actions combine job names.
if " / " in k:
a : List[Any] = k.find(''' / ''')
a : Tuple = k[index + len(''' / ''') :]
a : int = v
with open(os.path.join(args.output_dir, '''job_links.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(job_links, fp, ensure_ascii=False, indent=4)
a : Tuple = get_artifacts_links(args.workflow_run_id, token=args.token)
with open(os.path.join(args.output_dir, '''artifacts.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(artifacts, fp, ensure_ascii=False, indent=4)
for idx, (name, url) in enumerate(artifacts.items()):
download_artifact(name, url, args.output_dir, args.token)
# Be gentle to GitHub
time.sleep(1)
a : Optional[int] = get_all_errors(args.output_dir, job_links=job_links)
# `e[1]` is the error
a : Union[str, Any] = Counter()
counter.update([e[1] for e in errors])
# print the top 30 most common test errors
a : Optional[int] = counter.most_common(3_0)
for item in most_common:
print(item)
with open(os.path.join(args.output_dir, '''errors.json'''), '''w''', encoding='''UTF-8''') as fp:
json.dump(errors, fp, ensure_ascii=False, indent=4)
a : int = reduce_by_error(errors)
a : str = reduce_by_model(errors)
a : int = make_github_table(reduced_by_error)
a : Optional[int] = make_github_table_per_model(reduced_by_model)
with open(os.path.join(args.output_dir, '''reduced_by_error.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa)
with open(os.path.join(args.output_dir, '''reduced_by_model.txt'''), '''w''', encoding='''UTF-8''') as fp:
fp.write(sa) | 19 | 0 |
"""simple docstring"""
a : List[Any] = [0, 2, 4, 6, 8]
a : List[Any] = [1, 3, 5, 7, 9]
def _UpperCamelCase ( _A , _A , _A , _A ) -> int:
if remaining_length == 0:
if digits[0] == 0 or digits[-1] == 0:
return 0
for i in range(length // 2 - 1 , -1 , -1 ):
remainder += digits[i] + digits[length - i - 1]
if remainder % 2 == 0:
return 0
remainder //= 1_0
return 1
if remaining_length == 1:
if remainder % 2 == 0:
return 0
_UpperCAmelCase = 0
for digit in range(1_0 ):
_UpperCAmelCase = digit
result += reversible_numbers(
0 , (remainder + 2 * digit) // 1_0 , _A , _A )
return result
_UpperCAmelCase = 0
for digita in range(1_0 ):
_UpperCAmelCase = digita
if (remainder + digita) % 2 == 0:
_UpperCAmelCase = ODD_DIGITS
else:
_UpperCAmelCase = EVEN_DIGITS
for digita in other_parity_digits:
_UpperCAmelCase = digita
result += reversible_numbers(
remaining_length - 2 , (remainder + digita + digita) // 1_0 , _A , _A , )
return result
def _UpperCamelCase ( _A = 9 ) -> int:
_UpperCAmelCase = 0
for length in range(1 , max_power + 1 ):
result += reversible_numbers(_A , 0 , [0] * length , _A )
return result
if __name__ == "__main__":
print(F"{solution() = }") | 702 |
"""simple docstring"""
import re
import warnings
from contextlib import contextmanager
from ...processing_utils import ProcessorMixin
class a_ ( _UpperCAmelCase ):
a : Any = ['image_processor', 'tokenizer']
a : Optional[int] = 'AutoImageProcessor'
a : Any = 'AutoTokenizer'
def __init__( self : List[str] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : List[str]=None , **__UpperCamelCase : List[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop("""feature_extractor""" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def __call__( self : Union[str, Any] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->List[str]:
'''simple docstring'''
if self._in_target_context_manager:
return self.current_processor(*__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""images""" , __UpperCamelCase )
_UpperCAmelCase = kwargs.pop("""text""" , __UpperCamelCase )
if len(__UpperCamelCase ) > 0:
_UpperCAmelCase = args[0]
_UpperCAmelCase = args[1:]
if images is None and text is None:
raise ValueError("""You need to specify either an `images` or `text` input to process.""" )
if images is not None:
_UpperCAmelCase = self.image_processor(__UpperCamelCase , *__UpperCamelCase , **__UpperCamelCase )
if text is not None:
_UpperCAmelCase = self.tokenizer(__UpperCamelCase , **__UpperCamelCase )
if text is None:
return inputs
elif images is None:
return encodings
else:
_UpperCAmelCase = encodings["""input_ids"""]
return inputs
def _snake_case ( self : Union[str, Any] , *__UpperCamelCase : int , **__UpperCamelCase : Tuple ) ->Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Tuple , *__UpperCamelCase : int , **__UpperCamelCase : Union[str, Any] ) ->int:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@contextmanager
def _snake_case ( self : Tuple ) ->Union[str, Any]:
'''simple docstring'''
warnings.warn(
"""`as_target_processor` is deprecated and will be removed in v5 of Transformers. You can process your """
"""labels by using the argument `text` of the regular `__call__` method (either in the same call as """
"""your images inputs, or in a separate call.""" )
_UpperCAmelCase = True
_UpperCAmelCase = self.tokenizer
yield
_UpperCAmelCase = self.image_processor
_UpperCAmelCase = False
def _snake_case ( self : str , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]=False , __UpperCamelCase : Union[str, Any]=None ) ->List[str]:
'''simple docstring'''
if added_vocab is None:
_UpperCAmelCase = self.tokenizer.get_added_vocab()
_UpperCAmelCase = {}
while tokens:
_UpperCAmelCase = re.search(r"""<s_(.*?)>""" , __UpperCamelCase , re.IGNORECASE )
if start_token is None:
break
_UpperCAmelCase = start_token.group(1 )
_UpperCAmelCase = re.search(rf"""</s_{key}>""" , __UpperCamelCase , re.IGNORECASE )
_UpperCAmelCase = start_token.group()
if end_token is None:
_UpperCAmelCase = tokens.replace(__UpperCamelCase , """""" )
else:
_UpperCAmelCase = end_token.group()
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.escape(__UpperCamelCase )
_UpperCAmelCase = re.search(f"""{start_token_escaped}(.*?){end_token_escaped}""" , __UpperCamelCase , re.IGNORECASE )
if content is not None:
_UpperCAmelCase = content.group(1 ).strip()
if r"<s_" in content and r"</s_" in content: # non-leaf node
_UpperCAmelCase = self.tokenajson(__UpperCamelCase , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if value:
if len(__UpperCamelCase ) == 1:
_UpperCAmelCase = value[0]
_UpperCAmelCase = value
else: # leaf nodes
_UpperCAmelCase = []
for leaf in content.split(r"""<sep/>""" ):
_UpperCAmelCase = leaf.strip()
if leaf in added_vocab and leaf[0] == "<" and leaf[-2:] == "/>":
_UpperCAmelCase = leaf[1:-2] # for categorical special tokens
output[key].append(__UpperCamelCase )
if len(output[key] ) == 1:
_UpperCAmelCase = output[key][0]
_UpperCAmelCase = tokens[tokens.find(__UpperCamelCase ) + len(__UpperCamelCase ) :].strip()
if tokens[:6] == r"<sep/>": # non-leaf nodes
return [output] + self.tokenajson(tokens[6:] , is_inner_value=__UpperCamelCase , added_vocab=__UpperCamelCase )
if len(__UpperCamelCase ):
return [output] if is_inner_value else output
else:
return [] if is_inner_value else {"text_sequence": tokens}
@property
def _snake_case ( self : Tuple ) ->Optional[int]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class
@property
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , )
return self.image_processor | 19 | 0 |
"""simple docstring"""
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import LevitImageProcessor
class a_ ( unittest.TestCase ):
def __init__( self : int , __UpperCamelCase : Optional[int] , __UpperCamelCase : Union[str, Any]=7 , __UpperCamelCase : Dict=3 , __UpperCamelCase : int=18 , __UpperCamelCase : int=30 , __UpperCamelCase : Dict=4_00 , __UpperCamelCase : int=True , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[Any]=True , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[int]=True , __UpperCamelCase : str=[0.5, 0.5, 0.5] , __UpperCamelCase : Tuple=[0.5, 0.5, 0.5] , ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = size if size is not None else {"""shortest_edge""": 18}
_UpperCAmelCase = crop_size if crop_size is not None else {"""height""": 18, """width""": 18}
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = image_size
_UpperCAmelCase = min_resolution
_UpperCAmelCase = max_resolution
_UpperCAmelCase = do_resize
_UpperCAmelCase = size
_UpperCAmelCase = do_center_crop
_UpperCAmelCase = crop_size
_UpperCAmelCase = do_normalize
_UpperCAmelCase = image_mean
_UpperCAmelCase = image_std
def _snake_case ( self : int ) ->Any:
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"do_center_crop": self.do_center_crop,
"size": self.size,
"crop_size": self.crop_size,
}
@require_torch
@require_vision
class a_ ( _UpperCAmelCase , unittest.TestCase ):
a : Optional[Any] = LevitImageProcessor if is_vision_available() else None
def _snake_case ( self : Union[str, Any] ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase = LevitImageProcessingTester(self )
@property
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
return self.image_processor_tester.prepare_image_processor_dict()
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(__UpperCamelCase , """image_mean""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """image_std""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_normalize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_resize""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """do_center_crop""" ) )
self.assertTrue(hasattr(__UpperCamelCase , """size""" ) )
def _snake_case ( self : Tuple ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18} )
self.assertEqual(image_processor.crop_size , {"""height""": 18, """width""": 18} )
_UpperCAmelCase = self.image_processing_class.from_dict(self.image_processor_dict , size=42 , crop_size=84 )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42} )
self.assertEqual(image_processor.crop_size , {"""height""": 84, """width""": 84} )
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
pass
def _snake_case ( self : str ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , Image.Image )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , numpify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , np.ndarray )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_UpperCAmelCase = prepare_image_inputs(self.image_processor_tester , equal_resolution=__UpperCamelCase , torchify=__UpperCamelCase )
for image in image_inputs:
self.assertIsInstance(__UpperCamelCase , torch.Tensor )
# Test not batched input
_UpperCAmelCase = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
1,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , )
# Test batched
_UpperCAmelCase = image_processing(__UpperCamelCase , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
self.image_processor_tester.crop_size["""height"""],
self.image_processor_tester.crop_size["""width"""],
) , ) | 703 |
"""simple docstring"""
import colorsys
from PIL import Image # type: ignore
def _UpperCamelCase ( _A , _A , _A ) -> float:
"""simple docstring"""
_UpperCAmelCase = x
_UpperCAmelCase = y
for step in range(_A ): # noqa: B007
_UpperCAmelCase = a * a - b * b + x
_UpperCAmelCase = 2 * a * b + y
_UpperCAmelCase = a_new
# divergence happens for all complex number with an absolute value
# greater than 4
if a * a + b * b > 4:
break
return step / (max_step - 1)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return (2_5_5, 2_5_5, 2_5_5)
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
if distance == 1:
return (0, 0, 0)
else:
return tuple(round(i * 2_5_5 ) for i in colorsys.hsv_to_rgb(_A , 1 , 1 ) )
def _UpperCamelCase ( _A = 8_0_0 , _A = 6_0_0 , _A = -0.6 , _A = 0 , _A = 3.2 , _A = 5_0 , _A = True , ) -> Image.Image:
"""simple docstring"""
_UpperCAmelCase = Image.new("""RGB""" , (image_width, image_height) )
_UpperCAmelCase = img.load()
# loop through the image-coordinates
for image_x in range(_A ):
for image_y in range(_A ):
# determine the figure-coordinates based on the image-coordinates
_UpperCAmelCase = figure_width / image_width * image_height
_UpperCAmelCase = figure_center_x + (image_x / image_width - 0.5) * figure_width
_UpperCAmelCase = figure_center_y + (image_y / image_height - 0.5) * figure_height
_UpperCAmelCase = get_distance(_A , _A , _A )
# color the corresponding pixel based on the selected coloring-function
if use_distance_color_coding:
_UpperCAmelCase = get_color_coded_rgb(_A )
else:
_UpperCAmelCase = get_black_and_white_rgb(_A )
return img
if __name__ == "__main__":
import doctest
doctest.testmod()
# colored version, full figure
a : List[str] = get_image()
# uncomment for colored version, different section, zoomed in
# img = get_image(figure_center_x = -0.6, figure_center_y = -0.4,
# figure_width = 0.8)
# uncomment for black and white version, full figure
# img = get_image(use_distance_color_coding = False)
# uncomment to save the image
# img.save("mandelbrot.png")
img.show() | 19 | 0 |
"""simple docstring"""
import math
import sys
import cva
import numpy as np
def _UpperCamelCase ( _A , _A ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = math.sqrt(_A )
_UpperCAmelCase = 1 / (sigma * math.sqrt(2 * math.pi ))
return cons * np.exp(-((img / sigma) ** 2) * 0.5 )
def _UpperCamelCase ( _A , _A , _A , _A ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = kernel_size // 2
return img[x - half : x + half + 1, y - half : y + half + 1]
def _UpperCamelCase ( _A , _A ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = np.zeros((kernel_size, kernel_size) )
for i in range(0 , _A ):
for j in range(0 , _A ):
_UpperCAmelCase = math.sqrt(
abs(i - kernel_size // 2 ) ** 2 + abs(j - kernel_size // 2 ) ** 2 )
return vec_gaussian(_A , _A )
def _UpperCamelCase ( _A , _A , _A , _A , ) -> np.ndarray:
"""simple docstring"""
_UpperCAmelCase = np.zeros(img.shape )
_UpperCAmelCase = get_gauss_kernel(_A , _A )
_UpperCAmelCase ,_UpperCAmelCase = img.shape
for i in range(kernel_size // 2 , size_x - kernel_size // 2 ):
for j in range(kernel_size // 2 , size_y - kernel_size // 2 ):
_UpperCAmelCase = get_slice(_A , _A , _A , _A )
_UpperCAmelCase = img_s - img_s[kernel_size // 2, kernel_size // 2]
_UpperCAmelCase = vec_gaussian(_A , _A )
_UpperCAmelCase = np.multiply(_A , _A )
_UpperCAmelCase = np.multiply(_A , _A )
_UpperCAmelCase = np.sum(_A ) / np.sum(_A )
_UpperCAmelCase = val
return imga
def _UpperCamelCase ( _A ) -> tuple:
"""simple docstring"""
_UpperCAmelCase = args[1] if args[1:] else """../image_data/lena.jpg"""
_UpperCAmelCase = float(args[2] ) if args[2:] else 1.0
_UpperCAmelCase = float(args[3] ) if args[3:] else 1.0
if args[4:]:
_UpperCAmelCase = int(args[4] )
_UpperCAmelCase = kernel_size + abs(kernel_size % 2 - 1 )
else:
_UpperCAmelCase = 5
return filename, spatial_variance, intensity_variance, kernel_size
if __name__ == "__main__":
a : int = parse_args(sys.argv)
a : Union[str, Any] = cva.imread(filename, 0)
cva.imshow('''input image''', img)
a : str = img / 2_5_5
a : Union[str, Any] = out.astype('''float32''')
a : str = bilateral_filter(out, spatial_variance, intensity_variance, kernel_size)
a : List[str] = out * 2_5_5
a : Any = np.uinta(out)
cva.imshow('''output image''', out)
cva.waitKey(0)
cva.destroyAllWindows() | 704 |
"""simple docstring"""
from typing import Optional
from torch import nn
from .transformer_ad import TransformeraDModel, TransformeraDModelOutput
class a_ ( nn.Module ):
def __init__( self : List[str] , __UpperCamelCase : int = 16 , __UpperCamelCase : int = 88 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : int = 1 , __UpperCamelCase : float = 0.0 , __UpperCamelCase : int = 32 , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : bool = False , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : Optional[int] = None , __UpperCamelCase : str = "geglu" , __UpperCamelCase : Optional[int] = None , ) ->Dict:
'''simple docstring'''
super().__init__()
_UpperCAmelCase = nn.ModuleList(
[
TransformeraDModel(
num_attention_heads=__UpperCamelCase , attention_head_dim=__UpperCamelCase , in_channels=__UpperCamelCase , num_layers=__UpperCamelCase , dropout=__UpperCamelCase , norm_num_groups=__UpperCamelCase , cross_attention_dim=__UpperCamelCase , attention_bias=__UpperCamelCase , sample_size=__UpperCamelCase , num_vector_embeds=__UpperCamelCase , activation_fn=__UpperCamelCase , num_embeds_ada_norm=__UpperCamelCase , )
for _ in range(2 )
] )
# Variables that can be set by a pipeline:
# The ratio of transformer1 to transformer2's output states to be combined during inference
_UpperCAmelCase = 0.5
# The shape of `encoder_hidden_states` is expected to be
# `(batch_size, condition_lengths[0]+condition_lengths[1], num_features)`
_UpperCAmelCase = [77, 2_57]
# Which transformer to use to encode which condition.
# E.g. `(1, 0)` means that we'll use `transformers[1](conditions[0])` and `transformers[0](conditions[1])`
_UpperCAmelCase = [1, 0]
def _snake_case ( self : Optional[int] , __UpperCamelCase : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : bool = True , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = hidden_states
_UpperCAmelCase = []
_UpperCAmelCase = 0
# attention_mask is not used yet
for i in range(2 ):
# for each of the two transformers, pass the corresponding condition tokens
_UpperCAmelCase = encoder_hidden_states[:, tokens_start : tokens_start + self.condition_lengths[i]]
_UpperCAmelCase = self.transformer_index_for_condition[i]
_UpperCAmelCase = self.transformers[transformer_index](
__UpperCamelCase , encoder_hidden_states=__UpperCamelCase , timestep=__UpperCamelCase , cross_attention_kwargs=__UpperCamelCase , return_dict=__UpperCamelCase , )[0]
encoded_states.append(encoded_state - input_states )
tokens_start += self.condition_lengths[i]
_UpperCAmelCase = encoded_states[0] * self.mix_ratio + encoded_states[1] * (1 - self.mix_ratio)
_UpperCAmelCase = output_states + input_states
if not return_dict:
return (output_states,)
return TransformeraDModelOutput(sample=__UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
import warnings
from typing import List
import numpy as np
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding
from ...utils import is_flax_available, is_tf_available, is_torch_available
class a_ ( _UpperCAmelCase ):
a : List[Any] = ['image_processor', 'tokenizer']
a : Any = 'OwlViTImageProcessor'
a : int = ('CLIPTokenizer', 'CLIPTokenizerFast')
def __init__( self : Optional[int] , __UpperCamelCase : Any=None , __UpperCamelCase : Optional[int]=None , **__UpperCamelCase : List[str] ) ->str:
'''simple docstring'''
_UpperCAmelCase = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" , __UpperCamelCase , )
_UpperCAmelCase = kwargs.pop("""feature_extractor""" )
_UpperCAmelCase = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(__UpperCamelCase , __UpperCamelCase )
def __call__( self : str , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : List[str]=None , __UpperCamelCase : Optional[Any]=None , __UpperCamelCase : Any="max_length" , __UpperCamelCase : List[str]="np" , **__UpperCamelCase : Dict ) ->str:
'''simple docstring'''
if text is None and query_images is None and images is None:
raise ValueError(
"""You have to specify at least one text or query image or image. All three cannot be none.""" )
if text is not None:
if isinstance(__UpperCamelCase , __UpperCamelCase ) or (isinstance(__UpperCamelCase , __UpperCamelCase ) and not isinstance(text[0] , __UpperCamelCase )):
_UpperCAmelCase = [self.tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )]
elif isinstance(__UpperCamelCase , __UpperCamelCase ) and isinstance(text[0] , __UpperCamelCase ):
_UpperCAmelCase = []
# Maximum number of queries across batch
_UpperCAmelCase = max([len(__UpperCamelCase ) for t in text] )
# Pad all batch samples to max number of text queries
for t in text:
if len(__UpperCamelCase ) != max_num_queries:
_UpperCAmelCase = t + [""" """] * (max_num_queries - len(__UpperCamelCase ))
_UpperCAmelCase = self.tokenizer(__UpperCamelCase , padding=__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
encodings.append(__UpperCamelCase )
else:
raise TypeError("""Input text should be a string, a list of strings or a nested list of strings""" )
if return_tensors == "np":
_UpperCAmelCase = np.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_UpperCAmelCase = np.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "jax" and is_flax_available():
import jax.numpy as jnp
_UpperCAmelCase = jnp.concatenate([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_UpperCAmelCase = jnp.concatenate([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
elif return_tensors == "pt" and is_torch_available():
import torch
_UpperCAmelCase = torch.cat([encoding["""input_ids"""] for encoding in encodings] , dim=0 )
_UpperCAmelCase = torch.cat([encoding["""attention_mask"""] for encoding in encodings] , dim=0 )
elif return_tensors == "tf" and is_tf_available():
import tensorflow as tf
_UpperCAmelCase = tf.stack([encoding["""input_ids"""] for encoding in encodings] , axis=0 )
_UpperCAmelCase = tf.stack([encoding["""attention_mask"""] for encoding in encodings] , axis=0 )
else:
raise ValueError("""Target return tensor type could not be returned""" )
_UpperCAmelCase = BatchEncoding()
_UpperCAmelCase = input_ids
_UpperCAmelCase = attention_mask
if query_images is not None:
_UpperCAmelCase = BatchEncoding()
_UpperCAmelCase = self.image_processor(
__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase ).pixel_values
_UpperCAmelCase = query_pixel_values
if images is not None:
_UpperCAmelCase = self.image_processor(__UpperCamelCase , return_tensors=__UpperCamelCase , **__UpperCamelCase )
if text is not None and images is not None:
_UpperCAmelCase = image_features.pixel_values
return encoding
elif query_images is not None and images is not None:
_UpperCAmelCase = image_features.pixel_values
return encoding
elif text is not None or query_images is not None:
return encoding
else:
return BatchEncoding(data=dict(**__UpperCamelCase ) , tensor_type=__UpperCamelCase )
def _snake_case ( self : Optional[int] , *__UpperCamelCase : Dict , **__UpperCamelCase : Optional[int] ) ->List[str]:
'''simple docstring'''
return self.image_processor.post_process(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Optional[Any] , *__UpperCamelCase : Union[str, Any] , **__UpperCamelCase : Any ) ->Optional[int]:
'''simple docstring'''
return self.image_processor.post_process_object_detection(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Union[str, Any] , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : List[str] ) ->Dict:
'''simple docstring'''
return self.image_processor.post_process_image_guided_detection(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Optional[int] , *__UpperCamelCase : List[Any] , **__UpperCamelCase : Union[str, Any] ) ->Tuple:
'''simple docstring'''
return self.tokenizer.batch_decode(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : Any , *__UpperCamelCase : Tuple , **__UpperCamelCase : Dict ) ->Dict:
'''simple docstring'''
return self.tokenizer.decode(*__UpperCamelCase , **__UpperCamelCase )
@property
def _snake_case ( self : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" , __UpperCamelCase , )
return self.image_processor_class
@property
def _snake_case ( self : Dict ) ->Any:
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" , __UpperCamelCase , )
return self.image_processor | 705 |
"""simple docstring"""
import argparse
import torch
from transformers import LxmertConfig, LxmertForPreTraining, load_tf_weights_in_lxmert
from transformers.utils import logging
logging.set_verbosity_info()
def _UpperCamelCase ( _A , _A , _A ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = LxmertConfig.from_json_file(_A )
print(F"""Building PyTorch model from configuration: {config}""" )
_UpperCAmelCase = LxmertForPreTraining(_A )
# Load weights from tf checkpoint
load_tf_weights_in_lxmert(_A , _A , _A )
# Save pytorch-model
print(F"""Save PyTorch model to {pytorch_dump_path}""" )
torch.save(model.state_dict() , _A )
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--tf_checkpoint_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help='''The config json file corresponding to the pre-trained model. \nThis specifies the model architecture.''',
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
a : Union[str, Any] = parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.config_file, args.pytorch_dump_path) | 19 | 0 |
import collections
import json
import os
import re
from typing import TYPE_CHECKING, List, Optional, Tuple
import numpy as np
from ...tokenization_utils_fast import PreTrainedTokenizer
from ...utils import logging
if TYPE_CHECKING:
from transformers.pipelines.conversational import Conversation
a : Optional[int] = logging.get_logger(__name__)
a : Any = {'''vocab_file''': '''vocab.txt''', '''emoji_file''': '''emoji.json'''}
a : Union[str, Any] = {
'''vocab_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/vocab.txt''',
},
'''emoji_file''': {
'''abeja/gpt-neox-japanese-2.7b''': '''https://huggingface.co/abeja/gpt-neox-japanese-2.7b/resolve/main/emoji.json''',
},
}
a : Optional[Any] = {
'''abeja/gpt-neox-japanese-2.7b''': 2_0_4_8,
}
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
with open(_A , """r""" , encoding="""utf-8""" ) as f:
_UpperCAmelCase = json.loads(f.read() )
_UpperCAmelCase = collections.OrderedDict()
_UpperCAmelCase = collections.OrderedDict()
_UpperCAmelCase = collections.OrderedDict()
with open(_A , """r""" , encoding="""utf-8""" ) as f:
_UpperCAmelCase = f.readlines()
_UpperCAmelCase = [[t.rstrip("""\n""" )] if (t == """,""" or """,""" not in t) else t.rstrip("""\n""" ).split(""",""" ) for t in token]
for idx, b in enumerate(_A ):
_UpperCAmelCase = b
_UpperCAmelCase = idx
for wd in b:
_UpperCAmelCase = idx
return vocab, raw_vocab, ids_to_tokens, emoji
class a_ ( _UpperCAmelCase ):
a : int = VOCAB_FILES_NAMES
a : Any = PRETRAINED_VOCAB_FILES_MAP
a : List[str] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a : Optional[Any] = ['input_ids', 'attention_mask']
def __init__( self : Any , __UpperCamelCase : Any , __UpperCamelCase : Dict , __UpperCamelCase : Optional[Any]="<|endoftext|>" , __UpperCamelCase : str="<|endoftext|>" , __UpperCamelCase : str="<|startoftext|>" , __UpperCamelCase : Any="<|endoftext|>" , __UpperCamelCase : Optional[int]=False , **__UpperCamelCase : Optional[Any] , ) ->List[Any]:
'''simple docstring'''
super().__init__(
unk_token=__UpperCamelCase , pad_token=__UpperCamelCase , bos_token=__UpperCamelCase , eos_token=__UpperCamelCase , do_clean_text=__UpperCamelCase , **__UpperCamelCase , )
if not os.path.isfile(__UpperCamelCase ):
raise ValueError(
f"""Can't find a vocabulary file at path '{vocab_file}'. To load the vocabulary from a Google pretrained"""
""" model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
if not os.path.isfile(__UpperCamelCase ):
raise ValueError(
f"""Can't find a emoji file at path '{emoji_file}'. To load the emoji information from a Google"""
""" pretrained model use `tokenizer = GPTNeoXJapaneseokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`""" )
_UpperCAmelCase = do_clean_text
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = load_vocab_and_emoji(__UpperCamelCase , __UpperCamelCase )
_UpperCAmelCase = SubWordJapaneseTokenizer(
vocab=self.vocab , ids_to_tokens=self.ids_to_tokens , emoji=self.emoji )
@property
def _snake_case ( self : Union[str, Any] ) ->Tuple:
'''simple docstring'''
return len(self.raw_vocab )
def _snake_case ( self : Any ) ->List[str]:
'''simple docstring'''
return dict(self.raw_vocab , **self.added_tokens_encoder )
def _snake_case ( self : Optional[int] , __UpperCamelCase : Tuple ) ->List[str]:
'''simple docstring'''
return self.subword_tokenizer.tokenize(__UpperCamelCase , clean=self.do_clean_text )
def _snake_case ( self : Any , __UpperCamelCase : List[str] ) ->Any:
'''simple docstring'''
return self.vocab.get(__UpperCamelCase , self.vocab.get(self.unk_token ) )
def _snake_case ( self : int , __UpperCamelCase : Union[str, Any] ) ->str:
'''simple docstring'''
return self.subword_tokenizer.convert_id_to_token(__UpperCamelCase )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : Optional[int] ) ->int:
'''simple docstring'''
_UpperCAmelCase = """""".join(__UpperCamelCase ).strip()
return out_string
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : "Conversation" ) ->List[int]:
'''simple docstring'''
_UpperCAmelCase = []
for is_user, text in conversation.iter_texts():
input_ids.extend(self.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase ) + [self.eos_token_id] )
if len(__UpperCamelCase ) > self.model_max_length:
_UpperCAmelCase = input_ids[-self.model_max_length :]
return input_ids
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Optional[str] = None ) ->Tuple[str]:
'''simple docstring'''
_UpperCAmelCase = 0
if os.path.isdir(__UpperCamelCase ):
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
_UpperCAmelCase = os.path.join(
__UpperCamelCase , (filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""emoji_file"""] )
else:
_UpperCAmelCase = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""vocab_file"""]
)
_UpperCAmelCase = (
(filename_prefix + """-""" if filename_prefix else """""") + save_directory + VOCAB_FILES_NAMES["""emoji_file"""]
)
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
for token_index, token in self.ids_to_tokens.items():
if index != token_index:
logger.warning(
f"""Saving vocabulary to {vocab_file}: vocabulary indices are not consecutive."""
""" Please check that the vocabulary is not corrupted!""" )
_UpperCAmelCase = token_index
writer.write(""",""".join(__UpperCamelCase ) + """\n""" )
index += 1
with open(__UpperCamelCase , """w""" , encoding="""utf-8""" ) as writer:
json.dump(self.emoji , __UpperCamelCase )
return vocab_file, emoji_file
class a_ ( _UpperCAmelCase ):
def __init__( self : Optional[int] , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : List[Any] , __UpperCamelCase : Dict ) ->Union[str, Any]:
'''simple docstring'''
_UpperCAmelCase = vocab # same as swe
_UpperCAmelCase = ids_to_tokens # same as bpe
_UpperCAmelCase = emoji
_UpperCAmelCase = np.max([len(__UpperCamelCase ) for w in self.vocab.keys()] )
_UpperCAmelCase = re.compile(r"""(https?|ftp)(:\/\/[-_\.!~*\'()a-zA-Z0-9;\/?:\@&=\+$,%#]+)""" )
_UpperCAmelCase = re.compile(r"""[A-Za-z0-9\._+]*@[\-_0-9A-Za-z]+(\.[A-Za-z]+)*""" )
_UpperCAmelCase = re.compile(r"""[\(]{0,1}[0-9]{2,4}[\)\-\(]{0,1}[0-9]{2,4}[\)\-]{0,1}[0-9]{3,4}""" )
_UpperCAmelCase = re.compile(
r"""([12]\d{3}[/\-年])*(0?[1-9]|1[0-2])[/\-月]((0?[1-9]|[12][0-9]|3[01])日?)*(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
_UpperCAmelCase = re.compile(
r"""(明治|大正|昭和|平成|令和|㍾|㍽|㍼|㍻|\u32ff)\d{1,2}年(0?[1-9]|1[0-2])月(0?[1-9]|[12][0-9]|3[01])日(\d{1,2}|:|\d{1,2}時|\d{1,2}分|\(日\)|\(月\)|\(火\)|\(水\)|\(木\)|\(金\)|\(土\)|㈰|㈪|㈫|㈬|㈭|㈮|㈯)*""" )
_UpperCAmelCase = re.compile(
r"""((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*億)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*万)*((0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*千)*(0|[1-9]\d*|[1-9]\d{0,2}(,\d{3})+)*(千円|万円|千万円|円|千ドル|万ドル|千万ドル|ドル|千ユーロ|万ユーロ|千万ユーロ|ユーロ)+(\(税込\)|\(税抜\)|\+tax)*""" )
_UpperCAmelCase = """─━│┃┄┅┆┇┈┉┊┋┌┍┎┏┐┑┒┓└┕┖┗┘┙┚┛├┝┞┟┠┡┢┣┤┥┦┧┨┩┪┫┬┭┮┯┰┱┲┳┴┵┶┷┸┹┺┻┼┽┾┿╀╁╂╃╄╅╆╇╈╉╊╋╌╍╎╏═║╒╓╔╕╖╗╘╙╚╛╜╝╞╟╠╡╢╣╤╥╦╧╨╩╪╫╬╭╮╯╰╱╲╳╴╵╶╷╸╹╺╻╼╽╾╿"""
_UpperCAmelCase = """▀▁▂▃▄▅▆▇█▉▊▋▌▍▎▏▐░▒▓▔▕▖▗▘▙▚▛▜▝▞▟"""
_UpperCAmelCase = str.maketrans({k: """<BLOCK>""" for k in keisen + blocks} )
def __len__( self : str ) ->List[Any]:
'''simple docstring'''
return len(self.ids_to_tokens )
def _snake_case ( self : Optional[Any] , __UpperCamelCase : str ) ->str:
'''simple docstring'''
_UpperCAmelCase = self.content_repattera.sub("""<URL>""" , __UpperCamelCase )
_UpperCAmelCase = self.content_repattera.sub("""<EMAIL>""" , __UpperCamelCase )
_UpperCAmelCase = self.content_repattera.sub("""<TEL>""" , __UpperCamelCase )
_UpperCAmelCase = self.content_repattera.sub("""<DATE>""" , __UpperCamelCase )
_UpperCAmelCase = self.content_repattera.sub("""<DATE>""" , __UpperCamelCase )
_UpperCAmelCase = self.content_repattera.sub("""<PRICE>""" , __UpperCamelCase )
_UpperCAmelCase = content.translate(self.content_transa )
while "<BLOCK><BLOCK>" in content:
_UpperCAmelCase = content.replace("""<BLOCK><BLOCK>""" , """<BLOCK>""" )
return content
def _snake_case ( self : Optional[int] , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any]=False ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = text.replace(""" """ , """<SP>""" )
_UpperCAmelCase = text.replace(""" """ , """<SP>""" )
_UpperCAmelCase = text.replace("""\r\n""" , """<BR>""" )
_UpperCAmelCase = text.replace("""\n""" , """<BR>""" )
_UpperCAmelCase = text.replace("""\r""" , """<BR>""" )
_UpperCAmelCase = text.replace("""\t""" , """<TAB>""" )
_UpperCAmelCase = text.replace("""—""" , """ー""" )
_UpperCAmelCase = text.replace("""−""" , """ー""" )
for k, v in self.emoji["emoji"].items():
if k in text:
_UpperCAmelCase = text.replace(__UpperCamelCase , __UpperCamelCase )
if clean:
_UpperCAmelCase = self.clean_text(__UpperCamelCase )
def check_simbol(__UpperCamelCase : Any ):
_UpperCAmelCase = x.encode()
if len(__UpperCamelCase ) == 1 and len(__UpperCamelCase ) == 2:
_UpperCAmelCase = (int(e[0] ) << 8) + int(e[1] )
if (
(c >= 0xc2a1 and c <= 0xc2bf)
or (c >= 0xc780 and c <= 0xc783)
or (c >= 0xcab9 and c <= 0xcbbf)
or (c >= 0xcc80 and c <= 0xcda2)
):
return True
return False
def checkuae(__UpperCamelCase : List[Any] ):
_UpperCAmelCase = x.encode()
if len(__UpperCamelCase ) == 1 and len(__UpperCamelCase ) == 3:
_UpperCAmelCase = (int(e[0] ) << 16) + (int(e[1] ) << 8) + int(e[2] )
if c >= 0xe28080 and c <= 0xe2b07f:
return True
return False
_UpperCAmelCase = 0
_UpperCAmelCase = []
while pos < len(__UpperCamelCase ):
_UpperCAmelCase = min(len(__UpperCamelCase ) , pos + self.maxlen + 1 ) if text[pos] == """<""" else pos + 3
_UpperCAmelCase = [] # (token_id, token, pos)
for e in range(__UpperCamelCase , __UpperCamelCase , -1 ):
_UpperCAmelCase = text[pos:e]
if wd in self.vocab:
if wd[0] == "<" and len(__UpperCamelCase ) > 2:
_UpperCAmelCase = [(self.vocab[wd], wd, e)]
break
else:
candidates.append((self.vocab[wd], wd, e) )
if len(__UpperCamelCase ) > 0:
# the smallest token_id is adopted
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = sorted(__UpperCamelCase , key=lambda __UpperCamelCase : x[0] )[0]
result.append(__UpperCamelCase )
_UpperCAmelCase = e
else:
_UpperCAmelCase = pos + 1
_UpperCAmelCase = text[pos:end]
if check_simbol(__UpperCamelCase ):
result.append("""<KIGOU>""" )
elif checkuae(__UpperCamelCase ):
result.append("""<U2000U2BFF>""" )
else:
for i in wd.encode("""utf-8""" ):
result.append("""<|byte%d|>""" % i )
_UpperCAmelCase = end
return result
def _snake_case ( self : int , __UpperCamelCase : Tuple , __UpperCamelCase : Optional[int]="\n" ) ->Any:
'''simple docstring'''
_UpperCAmelCase = []
_UpperCAmelCase = []
_UpperCAmelCase = self.ids_to_tokens[index][0]
if word[:6] == "<|byte" and word[-2:] == "|>":
byte_tokens.append(int(word[6:-2] ) )
else:
if len(__UpperCamelCase ) > 0:
words.append(bytearray(__UpperCamelCase ).decode("""utf-8""" , errors="""replace""" ) )
_UpperCAmelCase = []
if word[:7] == "<|emoji" and word[-2:] == "|>":
words.append(self.emoji["""emoji_inv"""][word] )
elif word == "<SP>":
words.append(""" """ )
elif word == "<BR>":
words.append(__UpperCamelCase )
elif word == "<TAB>":
words.append("""\t""" )
elif word == "<BLOCK>":
words.append("""▀""" )
elif word == "<KIGOU>":
words.append("""ǀ""" )
elif word == "<U2000U2BFF>":
words.append("""‖""" )
else:
words.append(__UpperCamelCase )
if len(__UpperCamelCase ) > 0:
words.append(bytearray(__UpperCamelCase ).decode("""utf-8""" , errors="""replace""" ) )
_UpperCAmelCase = """""".join(__UpperCamelCase )
return text | 706 |
"""simple docstring"""
import argparse
import os
import re
import packaging.version
a : str = '''examples/'''
a : List[str] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
a : Tuple = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
a : List[str] = '''README.md'''
def _UpperCamelCase ( _A , _A , _A ) -> Dict:
"""simple docstring"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase ,_UpperCAmelCase = REPLACE_PATTERNS[pattern]
_UpperCAmelCase = replace.replace("""VERSION""" , _A )
_UpperCAmelCase = re_pattern.sub(_A , _A )
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.write(_A )
def _UpperCamelCase ( _A ) -> Union[str, Any]:
"""simple docstring"""
for folder, directories, fnames in os.walk(_A ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove("""research_projects""" )
if "legacy" in directories:
directories.remove("""legacy""" )
for fname in fnames:
if fname.endswith(""".py""" ):
update_version_in_file(os.path.join(_A , _A ) , _A , pattern="""examples""" )
def _UpperCamelCase ( _A , _A=False ) -> int:
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(_A , _A , _A )
if not patch:
update_version_in_examples(_A )
def _UpperCamelCase ( ) -> Any:
"""simple docstring"""
_UpperCAmelCase = """🤗 Transformers currently provides the following architectures"""
_UpperCAmelCase = """1. Want to contribute a new model?"""
with open(_A , """r""" , encoding="""utf-8""" , newline="""\n""" ) as f:
_UpperCAmelCase = f.readlines()
# Find the start of the list.
_UpperCAmelCase = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
_UpperCAmelCase = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith("""1.""" ):
_UpperCAmelCase = lines[index].replace(
"""https://huggingface.co/docs/diffusers/main/model_doc""" , """https://huggingface.co/docs/diffusers/model_doc""" , )
index += 1
with open(_A , """w""" , encoding="""utf-8""" , newline="""\n""" ) as f:
f.writelines(_A )
def _UpperCamelCase ( ) -> Optional[int]:
"""simple docstring"""
with open(REPLACE_FILES["""init"""] , """r""" ) as f:
_UpperCAmelCase = f.read()
_UpperCAmelCase = REPLACE_PATTERNS["""init"""][0].search(_A ).groups()[0]
return packaging.version.parse(_A )
def _UpperCamelCase ( _A=False ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_version()
if patch and default_version.is_devrelease:
raise ValueError("""Can't create a patch version from the dev branch, checkout a released version!""" )
if default_version.is_devrelease:
_UpperCAmelCase = default_version.base_version
elif patch:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor}.{default_version.micro + 1}"""
else:
_UpperCAmelCase = F"""{default_version.major}.{default_version.minor + 1}.0"""
# Now let's ask nicely if that's the right one.
_UpperCAmelCase = input(F"""Which version are you releasing? [{default_version}]""" )
if len(_A ) == 0:
_UpperCAmelCase = default_version
print(F"""Updating version to {version}.""" )
global_version_update(_A , patch=_A )
def _UpperCamelCase ( ) -> List[Any]:
"""simple docstring"""
_UpperCAmelCase = get_version()
_UpperCAmelCase = F"""{current_version.major}.{current_version.minor + 1}.0.dev0"""
_UpperCAmelCase = current_version.base_version
# Check with the user we got that right.
_UpperCAmelCase = input(F"""Which version are we developing now? [{dev_version}]""" )
if len(_A ) == 0:
_UpperCAmelCase = dev_version
print(F"""Updating version to {version}.""" )
global_version_update(_A )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
a : Dict = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
a : Tuple = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work() | 19 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
a : str = {
'''configuration_mask2former''': [
'''MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''Mask2FormerConfig''',
],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Union[str, Any] = ['''Mask2FormerImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : Any = [
'''MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Mask2FormerForUniversalSegmentation''',
'''Mask2FormerModel''',
'''Mask2FormerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_maskaformer import MASK2FORMER_PRETRAINED_CONFIG_ARCHIVE_MAP, MaskaFormerConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .image_processing_maskaformer import MaskaFormerImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_maskaformer import (
MASK2FORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
MaskaFormerForUniversalSegmentation,
MaskaFormerModel,
MaskaFormerPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 707 |
"""simple docstring"""
from __future__ import annotations
def _UpperCamelCase ( _A ) -> None:
"""simple docstring"""
create_state_space_tree(_A , [] , 0 , [0 for i in range(len(_A ) )] )
def _UpperCamelCase ( _A , _A , _A , _A , ) -> None:
"""simple docstring"""
if index == len(_A ):
print(_A )
return
for i in range(len(_A ) ):
if not index_used[i]:
current_sequence.append(sequence[i] )
_UpperCAmelCase = True
create_state_space_tree(_A , _A , index + 1 , _A )
current_sequence.pop()
_UpperCAmelCase = False
a : list[int | str] = [3, 1, 2, 4]
generate_all_permutations(sequence)
a : list[int | str] = ["A", "B", "C"]
generate_all_permutations(sequence_a) | 19 | 0 |
"""simple docstring"""
import requests
a : List[str] = '''YOUR API KEY'''
def _UpperCamelCase ( _A , _A = giphy_api_key ) -> list:
"""simple docstring"""
_UpperCAmelCase = """+""".join(query.split() )
_UpperCAmelCase = F"""https://api.giphy.com/v1/gifs/search?q={formatted_query}&api_key={api_key}"""
_UpperCAmelCase = requests.get(_A ).json()["""data"""]
return [gif["url"] for gif in gifs]
if __name__ == "__main__":
print('''\n'''.join(get_gifs('''space ship'''))) | 708 |
"""simple docstring"""
import inspect
import unittest
from transformers import DPTConfig
from transformers.file_utils import is_torch_available, is_vision_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, _config_zero_init, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import MODEL_MAPPING, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTModel
from transformers.models.dpt.modeling_dpt import DPT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import DPTImageProcessor
class a_ :
def __init__( self : Union[str, Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : Optional[int]=2 , __UpperCamelCase : int=32 , __UpperCamelCase : Tuple=16 , __UpperCamelCase : Dict=3 , __UpperCamelCase : Dict=True , __UpperCamelCase : Optional[Any]=True , __UpperCamelCase : Optional[Any]=32 , __UpperCamelCase : Any=4 , __UpperCamelCase : Optional[int]=[0, 1, 2, 3] , __UpperCamelCase : str=4 , __UpperCamelCase : Optional[Any]=37 , __UpperCamelCase : str="gelu" , __UpperCamelCase : Optional[int]=0.1 , __UpperCamelCase : Any=0.1 , __UpperCamelCase : Any=0.0_2 , __UpperCamelCase : Optional[int]=3 , __UpperCamelCase : int=[1, 3_84, 24, 24] , __UpperCamelCase : Union[str, Any]=True , __UpperCamelCase : Any=None , ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = parent
_UpperCAmelCase = batch_size
_UpperCAmelCase = image_size
_UpperCAmelCase = patch_size
_UpperCAmelCase = num_channels
_UpperCAmelCase = is_training
_UpperCAmelCase = use_labels
_UpperCAmelCase = hidden_size
_UpperCAmelCase = num_hidden_layers
_UpperCAmelCase = backbone_out_indices
_UpperCAmelCase = num_attention_heads
_UpperCAmelCase = intermediate_size
_UpperCAmelCase = hidden_act
_UpperCAmelCase = hidden_dropout_prob
_UpperCAmelCase = attention_probs_dropout_prob
_UpperCAmelCase = initializer_range
_UpperCAmelCase = num_labels
_UpperCAmelCase = backbone_featmap_shape
_UpperCAmelCase = scope
_UpperCAmelCase = is_hybrid
# sequence length of DPT = num_patches + 1 (we add 1 for the [CLS] token)
_UpperCAmelCase = (image_size // patch_size) ** 2
_UpperCAmelCase = num_patches + 1
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
_UpperCAmelCase = None
if self.use_labels:
_UpperCAmelCase = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
_UpperCAmelCase = self.get_config()
return config, pixel_values, labels
def _snake_case ( self : List[str] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = {
"""global_padding""": """same""",
"""layer_type""": """bottleneck""",
"""depths""": [3, 4, 9],
"""out_features""": ["""stage1""", """stage2""", """stage3"""],
"""embedding_dynamic_padding""": True,
"""hidden_sizes""": [96, 1_92, 3_84, 7_68],
"""num_groups""": 2,
}
return DPTConfig(
image_size=self.image_size , patch_size=self.patch_size , num_channels=self.num_channels , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , backbone_out_indices=self.backbone_out_indices , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , is_decoder=__UpperCamelCase , initializer_range=self.initializer_range , is_hybrid=self.is_hybrid , backbone_config=__UpperCamelCase , backbone_featmap_shape=self.backbone_featmap_shape , )
def _snake_case ( self : Any , __UpperCamelCase : Dict , __UpperCamelCase : Union[str, Any] , __UpperCamelCase : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = DPTModel(config=__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def _snake_case ( self : List[Any] , __UpperCamelCase : Optional[int] , __UpperCamelCase : str , __UpperCamelCase : List[Any] ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase )
self.parent.assertEqual(result.predicted_depth.shape , (self.batch_size, self.image_size, self.image_size) )
def _snake_case ( self : Any , __UpperCamelCase : List[Any] , __UpperCamelCase : Optional[Any] , __UpperCamelCase : Union[str, Any] ) ->List[Any]:
'''simple docstring'''
_UpperCAmelCase = self.num_labels
_UpperCAmelCase = DPTForSemanticSegmentation(__UpperCamelCase )
model.to(__UpperCamelCase )
model.eval()
_UpperCAmelCase = model(__UpperCamelCase , labels=__UpperCamelCase )
self.parent.assertEqual(
result.logits.shape , (self.batch_size, self.num_labels, self.image_size, self.image_size) )
def _snake_case ( self : Tuple ) ->Any:
'''simple docstring'''
_UpperCAmelCase = self.prepare_config_and_inputs()
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = config_and_inputs
_UpperCAmelCase = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class a_ ( _UpperCAmelCase , _UpperCAmelCase , unittest.TestCase ):
a : Dict = (DPTModel, DPTForDepthEstimation, DPTForSemanticSegmentation) if is_torch_available() else ()
a : int = (
{
'depth-estimation': DPTForDepthEstimation,
'feature-extraction': DPTModel,
'image-segmentation': DPTForSemanticSegmentation,
}
if is_torch_available()
else {}
)
a : str = False
a : List[str] = False
a : Dict = False
def _snake_case ( self : Any ) ->Any:
'''simple docstring'''
_UpperCAmelCase = DPTModelTester(self )
_UpperCAmelCase = ConfigTester(self , config_class=__UpperCamelCase , has_text_modality=__UpperCamelCase , hidden_size=37 )
def _snake_case ( self : Optional[int] ) ->Any:
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""DPT does not use inputs_embeds""" )
def _snake_case ( self : Tuple ) ->Tuple:
'''simple docstring'''
pass
def _snake_case ( self : int ) ->Any:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
self.assertIsInstance(model.get_input_embeddings() , (nn.Module) )
_UpperCAmelCase = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(__UpperCamelCase , nn.Linear ) )
def _snake_case ( self : Optional[int] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(__UpperCamelCase )
_UpperCAmelCase = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
_UpperCAmelCase = [*signature.parameters.keys()]
_UpperCAmelCase = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] , __UpperCamelCase )
def _snake_case ( self : Optional[Any] ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__UpperCamelCase )
def _snake_case ( self : str ) ->int:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_depth_estimation(*__UpperCamelCase )
def _snake_case ( self : Any ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*__UpperCamelCase )
def _snake_case ( self : str ) ->Any:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ):
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : List[str] ) ->Dict:
'''simple docstring'''
for model_class in self.all_model_classes:
if model_class.__name__ == "DPTForDepthEstimation":
continue
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = False
_UpperCAmelCase = True
if model_class in get_values(__UpperCamelCase ) or not model_class.supports_gradient_checkpointing:
continue
_UpperCAmelCase = model_class(__UpperCamelCase )
model.to(__UpperCamelCase )
model.gradient_checkpointing_enable()
model.train()
_UpperCAmelCase = self._prepare_for_class(__UpperCamelCase , __UpperCamelCase , return_labels=__UpperCamelCase )
_UpperCAmelCase = model(**__UpperCamelCase ).loss
loss.backward()
def _snake_case ( self : Tuple ) ->List[str]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = _config_zero_init(__UpperCamelCase )
for model_class in self.all_model_classes:
_UpperCAmelCase = model_class(config=__UpperCamelCase )
# Skip the check for the backbone
_UpperCAmelCase = []
for name, module in model.named_modules():
if module.__class__.__name__ == "DPTViTHybridEmbeddings":
_UpperCAmelCase = [f"""{name}.{key}""" for key in module.state_dict().keys()]
break
for name, param in model.named_parameters():
if param.requires_grad:
if name in backbone_params:
continue
self.assertIn(
((param.data.mean() * 1e9).round() / 1e9).item() , [0.0, 1.0] , msg=f"""Parameter {name} of model {model_class} seems not properly initialized""" , )
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _snake_case ( self : Dict ) ->Tuple:
'''simple docstring'''
pass
@slow
def _snake_case ( self : Optional[int] ) ->List[Any]:
'''simple docstring'''
for model_name in DPT_PRETRAINED_MODEL_ARCHIVE_LIST[1:]:
_UpperCAmelCase = DPTModel.from_pretrained(__UpperCamelCase )
self.assertIsNotNone(__UpperCamelCase )
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase = self.model_tester.prepare_config_and_inputs_for_common()
_UpperCAmelCase = """add"""
with self.assertRaises(__UpperCamelCase ):
_UpperCAmelCase = DPTForDepthEstimation(__UpperCamelCase )
def _UpperCamelCase ( ) -> int:
"""simple docstring"""
_UpperCAmelCase = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
return image
@require_torch
@require_vision
@slow
class a_ ( unittest.TestCase ):
def _snake_case ( self : Any ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = DPTImageProcessor.from_pretrained("""Intel/dpt-hybrid-midas""" )
_UpperCAmelCase = DPTForDepthEstimation.from_pretrained("""Intel/dpt-hybrid-midas""" ).to(__UpperCamelCase )
_UpperCAmelCase = prepare_img()
_UpperCAmelCase = image_processor(images=__UpperCamelCase , return_tensors="""pt""" ).to(__UpperCamelCase )
# forward pass
with torch.no_grad():
_UpperCAmelCase = model(**__UpperCamelCase )
_UpperCAmelCase = outputs.predicted_depth
# verify the predicted depth
_UpperCAmelCase = torch.Size((1, 3_84, 3_84) )
self.assertEqual(predicted_depth.shape , __UpperCamelCase )
_UpperCAmelCase = torch.tensor(
[[[5.6_4_3_7, 5.6_1_4_6, 5.6_5_1_1], [5.4_3_7_1, 5.5_6_4_9, 5.5_9_5_8], [5.5_2_1_5, 5.5_1_8_4, 5.5_2_9_3]]] ).to(__UpperCamelCase )
self.assertTrue(torch.allclose(outputs.predicted_depth[:3, :3, :3] / 1_00 , __UpperCamelCase , atol=1e-4 ) ) | 19 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
a : str = {
'''configuration_mega''': ['''MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegaConfig''', '''MegaOnnxConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
a : int = [
'''MEGA_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegaForCausalLM''',
'''MegaForMaskedLM''',
'''MegaForMultipleChoice''',
'''MegaForQuestionAnswering''',
'''MegaForSequenceClassification''',
'''MegaForTokenClassification''',
'''MegaModel''',
'''MegaPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mega import MEGA_PRETRAINED_CONFIG_ARCHIVE_MAP, MegaConfig, MegaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mega import (
MEGA_PRETRAINED_MODEL_ARCHIVE_LIST,
MegaForCausalLM,
MegaForMaskedLM,
MegaForMultipleChoice,
MegaForQuestionAnswering,
MegaForSequenceClassification,
MegaForTokenClassification,
MegaModel,
MegaPreTrainedModel,
)
else:
import sys
a : Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 709 |
"""simple docstring"""
import enum
import warnings
from ..tokenization_utils import TruncationStrategy
from ..utils import add_end_docstrings, is_tf_available, is_torch_available, logging
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_tf_available():
import tensorflow as tf
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
a : List[str] = logging.get_logger(__name__)
class a_ ( enum.Enum ):
a : Optional[Any] = 0
a : Dict = 1
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'generated'
def __init__( self : int , *__UpperCamelCase : Optional[int] , **__UpperCamelCase : str ) ->Any:
'''simple docstring'''
super().__init__(*__UpperCamelCase , **__UpperCamelCase )
self.check_model_type(
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING
if self.framework == """tf"""
else MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING )
def _snake_case ( self : Optional[int] , __UpperCamelCase : List[Any]=None , __UpperCamelCase : Union[str, Any]=None , __UpperCamelCase : Any=None , __UpperCamelCase : int=None , __UpperCamelCase : Tuple=None , __UpperCamelCase : Dict=None , **__UpperCamelCase : Any , ) ->Tuple:
'''simple docstring'''
_UpperCAmelCase = {}
if truncation is not None:
_UpperCAmelCase = truncation
_UpperCAmelCase = generate_kwargs
_UpperCAmelCase = {}
if return_tensors is not None and return_type is None:
_UpperCAmelCase = ReturnType.TENSORS if return_tensors else ReturnType.TEXT
if return_type is not None:
_UpperCAmelCase = return_type
if clean_up_tokenization_spaces is not None:
_UpperCAmelCase = clean_up_tokenization_spaces
if stop_sequence is not None:
_UpperCAmelCase = self.tokenizer.encode(__UpperCamelCase , add_special_tokens=__UpperCamelCase )
if len(__UpperCamelCase ) > 1:
warnings.warn(
"""Stopping on a multiple token sequence is not yet supported on transformers. The first token of"""
""" the stop sequence will be used as the stop sequence string in the interim.""" )
_UpperCAmelCase = stop_sequence_ids[0]
return preprocess_params, forward_params, postprocess_params
def _snake_case ( self : int , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
return True
def _snake_case ( self : Optional[Any] , *__UpperCamelCase : Any , __UpperCamelCase : Dict ) ->Dict:
'''simple docstring'''
_UpperCAmelCase = self.model.config.prefix if self.model.config.prefix is not None else """"""
if isinstance(args[0] , __UpperCamelCase ):
if self.tokenizer.pad_token_id is None:
raise ValueError("""Please make sure that the tokenizer has a pad_token_id when using a batch input""" )
_UpperCAmelCase = ([prefix + arg for arg in args[0]],)
_UpperCAmelCase = True
elif isinstance(args[0] , __UpperCamelCase ):
_UpperCAmelCase = (prefix + args[0],)
_UpperCAmelCase = False
else:
raise ValueError(
f""" `args[0]`: {args[0]} have the wrong format. The should be either of type `str` or type `list`""" )
_UpperCAmelCase = self.tokenizer(*__UpperCamelCase , padding=__UpperCamelCase , truncation=__UpperCamelCase , return_tensors=self.framework )
# This is produced by tokenizers but is an invalid generate kwargs
if "token_type_ids" in inputs:
del inputs["token_type_ids"]
return inputs
def __call__( self : Dict , *__UpperCamelCase : str , **__UpperCamelCase : Dict ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = super().__call__(*__UpperCamelCase , **__UpperCamelCase )
if (
isinstance(args[0] , __UpperCamelCase )
and all(isinstance(__UpperCamelCase , __UpperCamelCase ) for el in args[0] )
and all(len(__UpperCamelCase ) == 1 for res in result )
):
return [res[0] for res in result]
return result
def _snake_case ( self : List[str] , __UpperCamelCase : Any , __UpperCamelCase : str=TruncationStrategy.DO_NOT_TRUNCATE , **__UpperCamelCase : Optional[int] ) ->Optional[int]:
'''simple docstring'''
_UpperCAmelCase = self._parse_and_tokenize(__UpperCamelCase , truncation=__UpperCamelCase , **__UpperCamelCase )
return inputs
def _snake_case ( self : str , __UpperCamelCase : Dict , **__UpperCamelCase : Dict ) ->Tuple:
'''simple docstring'''
if self.framework == "pt":
_UpperCAmelCase ,_UpperCAmelCase = model_inputs["""input_ids"""].shape
elif self.framework == "tf":
_UpperCAmelCase ,_UpperCAmelCase = tf.shape(model_inputs["""input_ids"""] ).numpy()
_UpperCAmelCase = generate_kwargs.get("""min_length""" , self.model.config.min_length )
_UpperCAmelCase = generate_kwargs.get("""max_length""" , self.model.config.max_length )
self.check_inputs(__UpperCamelCase , generate_kwargs["""min_length"""] , generate_kwargs["""max_length"""] )
_UpperCAmelCase = self.model.generate(**__UpperCamelCase , **__UpperCamelCase )
_UpperCAmelCase = output_ids.shape[0]
if self.framework == "pt":
_UpperCAmelCase = output_ids.reshape(__UpperCamelCase , out_b // in_b , *output_ids.shape[1:] )
elif self.framework == "tf":
_UpperCAmelCase = tf.reshape(__UpperCamelCase , (in_b, out_b // in_b, *output_ids.shape[1:]) )
return {"output_ids": output_ids}
def _snake_case ( self : Tuple , __UpperCamelCase : str , __UpperCamelCase : Union[str, Any]=ReturnType.TEXT , __UpperCamelCase : int=False ) ->Any:
'''simple docstring'''
_UpperCAmelCase = []
for output_ids in model_outputs["output_ids"][0]:
if return_type == ReturnType.TENSORS:
_UpperCAmelCase = {f"""{self.return_name}_token_ids""": output_ids}
elif return_type == ReturnType.TEXT:
_UpperCAmelCase = {
f"""{self.return_name}_text""": self.tokenizer.decode(
__UpperCamelCase , skip_special_tokens=__UpperCamelCase , clean_up_tokenization_spaces=__UpperCamelCase , )
}
records.append(__UpperCamelCase )
return records
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : List[Any] = 'summary'
def __call__( self : Optional[Any] , *__UpperCamelCase : Optional[Any] , **__UpperCamelCase : Optional[int] ) ->Any:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase )
def _snake_case ( self : str , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->bool:
'''simple docstring'''
if max_length < min_length:
logger.warning(f"""Your min_length={min_length} must be inferior than your max_length={max_length}.""" )
if input_length < max_length:
logger.warning(
f"""Your max_length is set to {max_length}, but your input_length is only {input_length}. Since this is """
"""a summarization task, where outputs shorter than the input are typically wanted, you might """
f"""consider decreasing max_length manually, e.g. summarizer('...', max_length={input_length//2})""" )
@add_end_docstrings(_UpperCAmelCase )
class a_ ( _UpperCAmelCase ):
a : Optional[int] = 'translation'
def _snake_case ( self : Union[str, Any] , __UpperCamelCase : int , __UpperCamelCase : int , __UpperCamelCase : int ) ->Any:
'''simple docstring'''
if input_length > 0.9 * max_length:
logger.warning(
f"""Your input_length: {input_length} is bigger than 0.9 * max_length: {max_length}. You might consider """
"""increasing your max_length manually, e.g. translator('...', max_length=400)""" )
return True
def _snake_case ( self : Tuple , *__UpperCamelCase : List[str] , __UpperCamelCase : Tuple=TruncationStrategy.DO_NOT_TRUNCATE , __UpperCamelCase : Tuple=None , __UpperCamelCase : Union[str, Any]=None ) ->Tuple:
'''simple docstring'''
if getattr(self.tokenizer , """_build_translation_inputs""" , __UpperCamelCase ):
return self.tokenizer._build_translation_inputs(
*__UpperCamelCase , return_tensors=self.framework , truncation=__UpperCamelCase , src_lang=__UpperCamelCase , tgt_lang=__UpperCamelCase )
else:
return super()._parse_and_tokenize(*__UpperCamelCase , truncation=__UpperCamelCase )
def _snake_case ( self : List[str] , __UpperCamelCase : int=None , __UpperCamelCase : int=None , **__UpperCamelCase : Any ) ->int:
'''simple docstring'''
_UpperCAmelCase ,_UpperCAmelCase ,_UpperCAmelCase = super()._sanitize_parameters(**__UpperCamelCase )
if src_lang is not None:
_UpperCAmelCase = src_lang
if tgt_lang is not None:
_UpperCAmelCase = tgt_lang
if src_lang is None and tgt_lang is None:
# Backward compatibility, direct arguments use is preferred.
_UpperCAmelCase = kwargs.get("""task""" , self.task )
_UpperCAmelCase = task.split("""_""" )
if task and len(__UpperCamelCase ) == 4:
# translation, XX, to YY
_UpperCAmelCase = items[1]
_UpperCAmelCase = items[3]
return preprocess_params, forward_params, postprocess_params
def __call__( self : List[str] , *__UpperCamelCase : str , **__UpperCamelCase : Optional[Any] ) ->int:
'''simple docstring'''
return super().__call__(*__UpperCamelCase , **__UpperCamelCase ) | 19 | 0 |
"""simple docstring"""
from __future__ import annotations
import random
# Maximum size of the population. Bigger could be faster but is more memory expensive.
a : Tuple = 2_0_0
# Number of elements selected in every generation of evolution. The selection takes
# place from best to worst of that generation and must be smaller than N_POPULATION.
a : Dict = 5_0
# Probability that an element of a generation can mutate, changing one of its genes.
# This will guarantee that all genes will be used during evolution.
a : Optional[int] = 0.4
# Just a seed to improve randomness required by the algorithm.
random.seed(random.randint(0, 1_0_0_0))
def _UpperCamelCase ( _A , _A ) -> tuple[str, float]:
"""simple docstring"""
_UpperCAmelCase = len([g for position, g in enumerate(_A ) if g == main_target[position]] )
return (item, float(_A ))
def _UpperCamelCase ( _A , _A ) -> tuple[str, str]:
"""simple docstring"""
_UpperCAmelCase = random.randint(0 , len(_A ) - 1 )
_UpperCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
_UpperCAmelCase = parent_a[:random_slice] + parent_a[random_slice:]
return (child_a, child_a)
def _UpperCamelCase ( _A , _A ) -> str:
"""simple docstring"""
_UpperCAmelCase = list(_A )
if random.uniform(0 , 1 ) < MUTATION_PROBABILITY:
_UpperCAmelCase = random.choice(_A )
return "".join(_A )
def _UpperCamelCase ( _A , _A , _A , ) -> list[str]:
"""simple docstring"""
_UpperCAmelCase = []
# Generate more children proportionally to the fitness score.
_UpperCAmelCase = int(parent_a[1] * 1_0_0 ) + 1
_UpperCAmelCase = 1_0 if child_n >= 1_0 else child_n
for _ in range(_A ):
_UpperCAmelCase = population_score[random.randint(0 , _A )][0]
_UpperCAmelCase ,_UpperCAmelCase = crossover(parent_a[0] , _A )
# Append new string to the population list.
pop.append(mutate(_A , _A ) )
pop.append(mutate(_A , _A ) )
return pop
def _UpperCamelCase ( _A , _A , _A = True ) -> tuple[int, int, str]:
"""simple docstring"""
if N_POPULATION < N_SELECTED:
_UpperCAmelCase = F"""{N_POPULATION} must be bigger than {N_SELECTED}"""
raise ValueError(_A )
# Verify that the target contains no genes besides the ones inside genes variable.
_UpperCAmelCase = sorted({c for c in target if c not in genes} )
if not_in_genes_list:
_UpperCAmelCase = F"""{not_in_genes_list} is not in genes list, evolution cannot converge"""
raise ValueError(_A )
# Generate random starting population.
_UpperCAmelCase = []
for _ in range(_A ):
population.append("""""".join([random.choice(_A ) for i in range(len(_A ) )] ) )
# Just some logs to know what the algorithms is doing.
_UpperCAmelCase ,_UpperCAmelCase = 0, 0
# This loop will end when we find a perfect match for our target.
while True:
generation += 1
total_population += len(_A )
# Random population created. Now it's time to evaluate.
# Adding a bit of concurrency can make everything faster,
#
# import concurrent.futures
# population_score: list[tuple[str, float]] = []
# with concurrent.futures.ThreadPoolExecutor(
# max_workers=NUM_WORKERS) as executor:
# futures = {executor.submit(evaluate, item) for item in population}
# concurrent.futures.wait(futures)
# population_score = [item.result() for item in futures]
#
# but with a simple algorithm like this, it will probably be slower.
# We just need to call evaluate for every item inside the population.
_UpperCAmelCase = [evaluate(_A , _A ) for item in population]
# Check if there is a matching evolution.
_UpperCAmelCase = sorted(_A , key=lambda _A : x[1] , reverse=_A )
if population_score[0][0] == target:
return (generation, total_population, population_score[0][0])
# Print the best result every 10 generation.
# Just to know that the algorithm is working.
if debug and generation % 1_0 == 0:
print(
F"""\nGeneration: {generation}"""
F"""\nTotal Population:{total_population}"""
F"""\nBest score: {population_score[0][1]}"""
F"""\nBest string: {population_score[0][0]}""" )
# Flush the old population, keeping some of the best evolutions.
# Keeping this avoid regression of evolution.
_UpperCAmelCase = population[: int(N_POPULATION / 3 )]
population.clear()
population.extend(_A )
# Normalize population score to be between 0 and 1.
_UpperCAmelCase = [
(item, score / len(_A )) for item, score in population_score
]
# This is selection
for i in range(_A ):
population.extend(select(population_score[int(_A )] , _A , _A ) )
# Check if the population has already reached the maximum value and if so,
# break the cycle. If this check is disabled, the algorithm will take
# forever to compute large strings, but will also calculate small strings in
# a far fewer generations.
if len(_A ) > N_POPULATION:
break
if __name__ == "__main__":
a : str = (
'''This is a genetic algorithm to evaluate, combine, evolve, and mutate a string!'''
)
a : int = list(
''' ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklm'''
'''nopqrstuvwxyz.,;!?+-*#@^\'èéòà€ù=)(&%$£/\\'''
)
a : str = basic(target_str, genes_list)
print(
F"\nGeneration: {generation}\nTotal Population: {population}\nTarget: {target}"
) | 710 |
"""simple docstring"""
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel
from diffusers import DDIMScheduler, LDMPipeline, UNetaDModel, VQModel
from diffusers.utils.testing_utils import enable_full_determinism, require_torch, slow, torch_device
enable_full_determinism()
class a_ ( unittest.TestCase ):
@property
def _snake_case ( self : Dict ) ->int:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = UNetaDModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=3 , out_channels=3 , down_block_types=("""DownBlock2D""", """AttnDownBlock2D""") , up_block_types=("""AttnUpBlock2D""", """UpBlock2D""") , )
return model
@property
def _snake_case ( self : Optional[Any] ) ->str:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = VQModel(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] , up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] , latent_channels=3 , )
return model
@property
def _snake_case ( self : List[Any] ) ->Optional[int]:
'''simple docstring'''
torch.manual_seed(0 )
_UpperCAmelCase = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1e-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=10_00 , )
return CLIPTextModel(__UpperCamelCase )
def _snake_case ( self : List[str] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = self.dummy_uncond_unet
_UpperCAmelCase = DDIMScheduler()
_UpperCAmelCase = self.dummy_vq_model
_UpperCAmelCase = LDMPipeline(unet=__UpperCamelCase , vqvae=__UpperCamelCase , scheduler=__UpperCamelCase )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" ).images
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=2 , output_type="""numpy""" , return_dict=__UpperCamelCase )[0]
_UpperCAmelCase = image[0, -3:, -3:, -1]
_UpperCAmelCase = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
_UpperCAmelCase = np.array([0.8_5_1_2, 0.8_1_8, 0.6_4_1_1, 0.6_8_0_8, 0.4_4_6_5, 0.5_6_1_8, 0.4_6, 0.6_2_3_1, 0.5_1_7_2] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < tolerance
@slow
@require_torch
class a_ ( unittest.TestCase ):
def _snake_case ( self : List[Any] ) ->Optional[Any]:
'''simple docstring'''
_UpperCAmelCase = LDMPipeline.from_pretrained("""CompVis/ldm-celebahq-256""" )
ldm.to(__UpperCamelCase )
ldm.set_progress_bar_config(disable=__UpperCamelCase )
_UpperCAmelCase = torch.manual_seed(0 )
_UpperCAmelCase = ldm(generator=__UpperCamelCase , num_inference_steps=5 , output_type="""numpy""" ).images
_UpperCAmelCase = image[0, -3:, -3:, -1]
assert image.shape == (1, 2_56, 2_56, 3)
_UpperCAmelCase = np.array([0.4_3_9_9, 0.4_4_9_7_5, 0.4_6_8_2_5, 0.4_7_4, 0.4_3_5_9, 0.4_5_8_1, 0.4_5_0_9_5, 0.4_3_4_1, 0.4_4_4_7] )
_UpperCAmelCase = 1e-2 if torch_device != """mps""" else 3e-2
assert np.abs(image_slice.flatten() - expected_slice ).max() < tolerance | 19 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.