code
stringlengths 87
55.2k
| code_codestyle
int64 0
349
| style_context
stringlengths 135
49.1k
| style_context_codestyle
int64 0
349
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import cached_download, hf_hub_url
from PIL import Image
from transformers import DPTConfig, DPTForDepthEstimation, DPTForSemanticSegmentation, DPTImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
lowerCAmelCase :Dict = logging.get_logger(__name__)
def lowerCamelCase ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__magic_name__ : List[Any] = DPTConfig(embedding_type='hybrid' )
if "large" in checkpoint_url:
__magic_name__ : Dict = 1024
__magic_name__ : str = 4096
__magic_name__ : Optional[int] = 24
__magic_name__ : str = 16
__magic_name__ : str = [5, 11, 17, 23]
__magic_name__ : Optional[Any] = [256, 512, 1024, 1024]
__magic_name__ : Optional[Any] = (1, 384, 384)
if "nyu" or "midas" in checkpoint_url:
__magic_name__ : List[Any] = 768
__magic_name__ : List[str] = [1, 1, 1, 0.5]
__magic_name__ : List[str] = [256, 512, 768, 768]
__magic_name__ : Any = 150
__magic_name__ : Union[str, Any] = 16
__magic_name__ : List[Any] = (1, 384, 384)
__magic_name__ : Dict = False
__magic_name__ : Dict = 'project'
if "ade" in checkpoint_url:
__magic_name__ : int = True
__magic_name__ : List[str] = 768
__magic_name__ : Dict = [1, 1, 1, 0.5]
__magic_name__ : Tuple = 150
__magic_name__ : str = 16
__magic_name__ : Union[str, Any] = 'huggingface/label-files'
__magic_name__ : Union[str, Any] = 'ade20k-id2label.json'
__magic_name__ : str = json.load(open(cached_download(hf_hub_url(lowerCAmelCase , lowerCAmelCase , repo_type='dataset' ) ) , 'r' ) )
__magic_name__ : List[Any] = {int(lowerCAmelCase ): v for k, v in idalabel.items()}
__magic_name__ : List[str] = idalabel
__magic_name__ : List[Any] = {v: k for k, v in idalabel.items()}
__magic_name__ : Optional[int] = [1, 150, 480, 480]
return config, expected_shape
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : str = ['pretrained.model.head.weight', 'pretrained.model.head.bias']
for k in ignore_keys:
state_dict.pop(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if (
"pretrained.model" in name
and "cls_token" not in name
and "pos_embed" not in name
and "patch_embed" not in name
):
__magic_name__ : Optional[Any] = name.replace('pretrained.model' , 'dpt.encoder' )
if "pretrained.model" in name:
__magic_name__ : List[Any] = name.replace('pretrained.model' , 'dpt.embeddings' )
if "patch_embed" in name:
__magic_name__ : Any = name.replace('patch_embed' , '' )
if "pos_embed" in name:
__magic_name__ : Optional[int] = name.replace('pos_embed' , 'position_embeddings' )
if "attn.proj" in name:
__magic_name__ : Dict = name.replace('attn.proj' , 'attention.output.dense' )
if "proj" in name and "project" not in name:
__magic_name__ : Union[str, Any] = name.replace('proj' , 'projection' )
if "blocks" in name:
__magic_name__ : List[str] = name.replace('blocks' , 'layer' )
if "mlp.fc1" in name:
__magic_name__ : Any = name.replace('mlp.fc1' , 'intermediate.dense' )
if "mlp.fc2" in name:
__magic_name__ : Optional[int] = name.replace('mlp.fc2' , 'output.dense' )
if "norm1" in name and "backbone" not in name:
__magic_name__ : Optional[Any] = name.replace('norm1' , 'layernorm_before' )
if "norm2" in name and "backbone" not in name:
__magic_name__ : Union[str, Any] = name.replace('norm2' , 'layernorm_after' )
if "scratch.output_conv" in name:
__magic_name__ : List[str] = name.replace('scratch.output_conv' , 'head' )
if "scratch" in name:
__magic_name__ : Union[str, Any] = name.replace('scratch' , 'neck' )
if "layer1_rn" in name:
__magic_name__ : Dict = name.replace('layer1_rn' , 'convs.0' )
if "layer2_rn" in name:
__magic_name__ : Optional[int] = name.replace('layer2_rn' , 'convs.1' )
if "layer3_rn" in name:
__magic_name__ : List[str] = name.replace('layer3_rn' , 'convs.2' )
if "layer4_rn" in name:
__magic_name__ : Union[str, Any] = name.replace('layer4_rn' , 'convs.3' )
if "refinenet" in name:
__magic_name__ : Tuple = int(name[len('neck.refinenet' ) : len('neck.refinenet' ) + 1] )
# tricky here: we need to map 4 to 0, 3 to 1, 2 to 2 and 1 to 3
__magic_name__ : str = name.replace(f'refinenet{layer_idx}' , f'fusion_stage.layers.{abs(layer_idx-4 )}' )
if "out_conv" in name:
__magic_name__ : int = name.replace('out_conv' , 'projection' )
if "resConfUnit1" in name:
__magic_name__ : List[str] = name.replace('resConfUnit1' , 'residual_layer1' )
if "resConfUnit2" in name:
__magic_name__ : Dict = name.replace('resConfUnit2' , 'residual_layer2' )
if "conv1" in name:
__magic_name__ : Any = name.replace('conv1' , 'convolution1' )
if "conv2" in name:
__magic_name__ : Union[str, Any] = name.replace('conv2' , 'convolution2' )
# readout blocks
if "pretrained.act_postprocess1.0.project.0" in name:
__magic_name__ : Optional[int] = name.replace('pretrained.act_postprocess1.0.project.0' , 'neck.reassemble_stage.readout_projects.0.0' )
if "pretrained.act_postprocess2.0.project.0" in name:
__magic_name__ : List[Any] = name.replace('pretrained.act_postprocess2.0.project.0' , 'neck.reassemble_stage.readout_projects.1.0' )
if "pretrained.act_postprocess3.0.project.0" in name:
__magic_name__ : Union[str, Any] = name.replace('pretrained.act_postprocess3.0.project.0' , 'neck.reassemble_stage.readout_projects.2.0' )
if "pretrained.act_postprocess4.0.project.0" in name:
__magic_name__ : Dict = name.replace('pretrained.act_postprocess4.0.project.0' , 'neck.reassemble_stage.readout_projects.3.0' )
# resize blocks
if "pretrained.act_postprocess1.3" in name:
__magic_name__ : int = name.replace('pretrained.act_postprocess1.3' , 'neck.reassemble_stage.layers.0.projection' )
if "pretrained.act_postprocess1.4" in name:
__magic_name__ : str = name.replace('pretrained.act_postprocess1.4' , 'neck.reassemble_stage.layers.0.resize' )
if "pretrained.act_postprocess2.3" in name:
__magic_name__ : str = name.replace('pretrained.act_postprocess2.3' , 'neck.reassemble_stage.layers.1.projection' )
if "pretrained.act_postprocess2.4" in name:
__magic_name__ : Dict = name.replace('pretrained.act_postprocess2.4' , 'neck.reassemble_stage.layers.1.resize' )
if "pretrained.act_postprocess3.3" in name:
__magic_name__ : Optional[int] = name.replace('pretrained.act_postprocess3.3' , 'neck.reassemble_stage.layers.2.projection' )
if "pretrained.act_postprocess4.3" in name:
__magic_name__ : Union[str, Any] = name.replace('pretrained.act_postprocess4.3' , 'neck.reassemble_stage.layers.3.projection' )
if "pretrained.act_postprocess4.4" in name:
__magic_name__ : str = name.replace('pretrained.act_postprocess4.4' , 'neck.reassemble_stage.layers.3.resize' )
if "pretrained" in name:
__magic_name__ : Union[str, Any] = name.replace('pretrained' , 'dpt' )
if "bn" in name:
__magic_name__ : Dict = name.replace('bn' , 'batch_norm' )
if "head" in name:
__magic_name__ : Optional[Any] = name.replace('head' , 'head.head' )
if "encoder.norm" in name:
__magic_name__ : Dict = name.replace('encoder.norm' , 'layernorm' )
if "auxlayer" in name:
__magic_name__ : int = name.replace('auxlayer' , 'auxiliary_head.head' )
if "backbone" in name:
__magic_name__ : Tuple = name.replace('backbone' , 'backbone.bit.encoder' )
if ".." in name:
__magic_name__ : Optional[int] = name.replace('..' , '.' )
if "stem.conv" in name:
__magic_name__ : Dict = name.replace('stem.conv' , 'bit.embedder.convolution' )
if "blocks" in name:
__magic_name__ : Tuple = name.replace('blocks' , 'layers' )
if "convolution" in name and "backbone" in name:
__magic_name__ : Optional[int] = name.replace('convolution' , 'conv' )
if "layer" in name and "backbone" in name:
__magic_name__ : List[str] = name.replace('layer' , 'layers' )
if "backbone.bit.encoder.bit" in name:
__magic_name__ : Optional[int] = name.replace('backbone.bit.encoder.bit' , 'backbone.bit' )
if "embedder.conv" in name:
__magic_name__ : int = name.replace('embedder.conv' , 'embedder.convolution' )
if "backbone.bit.encoder.stem.norm" in name:
__magic_name__ : Optional[int] = name.replace('backbone.bit.encoder.stem.norm' , 'backbone.bit.embedder.norm' )
return name
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : int ):
"""simple docstring"""
for i in range(config.num_hidden_layers ):
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
__magic_name__ : int = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.weight' )
__magic_name__ : str = state_dict.pop(f'dpt.encoder.layer.{i}.attn.qkv.bias' )
# next, add query, keys and values (in that order) to the state dict
__magic_name__ : Optional[Any] = in_proj_weight[: config.hidden_size, :]
__magic_name__ : Any = in_proj_bias[: config.hidden_size]
__magic_name__ : List[Any] = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
__magic_name__ : Any = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
__magic_name__ : Optional[Any] = in_proj_weight[
-config.hidden_size :, :
]
__magic_name__ : List[str] = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : List[Any] = 'http://images.cocodataset.org/val2017/000000039769.jpg'
__magic_name__ : str = Image.open(requests.get(lowerCAmelCase , stream=lowerCAmelCase ).raw )
return im
@torch.no_grad()
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : List[Any] , lowerCAmelCase : Any , lowerCAmelCase : Any , lowerCAmelCase : List[str] ):
"""simple docstring"""
__magic_name__ , __magic_name__ : Tuple = get_dpt_config(lowerCAmelCase )
# load original state_dict from URL
# state_dict = torch.hub.load_state_dict_from_url(checkpoint_url, map_location="cpu")
__magic_name__ : int = torch.load(lowerCAmelCase , map_location='cpu' )
# remove certain keys
remove_ignore_keys_(lowerCAmelCase )
# rename keys
for key in state_dict.copy().keys():
__magic_name__ : Union[str, Any] = state_dict.pop(lowerCAmelCase )
__magic_name__ : Tuple = val
# read in qkv matrices
read_in_q_k_v(lowerCAmelCase , lowerCAmelCase )
# load HuggingFace model
__magic_name__ : str = DPTForSemanticSegmentation(lowerCAmelCase ) if 'ade' in checkpoint_url else DPTForDepthEstimation(lowerCAmelCase )
model.load_state_dict(lowerCAmelCase )
model.eval()
# Check outputs on an image
__magic_name__ : Tuple = 480 if 'ade' in checkpoint_url else 384
__magic_name__ : Union[str, Any] = DPTImageProcessor(size=lowerCAmelCase )
__magic_name__ : int = prepare_img()
__magic_name__ : Optional[Any] = image_processor(lowerCAmelCase , return_tensors='pt' )
# forward pass
__magic_name__ : Optional[Any] = model(**lowerCAmelCase ).logits if 'ade' in checkpoint_url else model(**lowerCAmelCase ).predicted_depth
if show_prediction:
__magic_name__ : str = (
torch.nn.functional.interpolate(
outputs.unsqueeze(1 ) , size=(image.size[1], image.size[0]) , mode='bicubic' , align_corners=lowerCAmelCase , )
.squeeze()
.cpu()
.numpy()
)
Image.fromarray((prediction / prediction.max()) * 255 ).show()
if pytorch_dump_folder_path is not None:
Path(lowerCAmelCase ).mkdir(exist_ok=lowerCAmelCase )
print(f'Saving model to {pytorch_dump_folder_path}' )
model.save_pretrained(lowerCAmelCase )
print(f'Saving image processor to {pytorch_dump_folder_path}' )
image_processor.save_pretrained(lowerCAmelCase )
if push_to_hub:
model.push_to_hub('ybelkada/dpt-hybrid-midas' )
image_processor.push_to_hub('ybelkada/dpt-hybrid-midas' )
if __name__ == "__main__":
lowerCAmelCase :Any = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--checkpoint_url''',
default='''https://github.com/intel-isl/DPT/releases/download/1_0/dpt_large-midas-2f21e586.pt''',
type=str,
help='''URL of the original DPT checkpoint you\'d like to convert.''',
)
parser.add_argument(
'''--pytorch_dump_folder_path''',
default=None,
type=str,
required=False,
help='''Path to the output PyTorch model directory.''',
)
parser.add_argument(
'''--push_to_hub''',
action='''store_true''',
)
parser.add_argument(
'''--model_name''',
default='''dpt-large''',
type=str,
help='''Name of the model, in case you\'re pushing to the hub.''',
)
parser.add_argument(
'''--show_prediction''',
action='''store_true''',
)
lowerCAmelCase :Tuple = parser.parse_args()
convert_dpt_checkpoint(
args.checkpoint_url, args.pytorch_dump_folder_path, args.push_to_hub, args.model_name, args.show_prediction
) | 331 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ):
"""simple docstring"""
__magic_name__ : list[int] = [0]
__magic_name__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__magic_name__ : int = 0
# the area corresponding to the grid that gives the product closest to target
__magic_name__ : int = 0
# an estimate of b, using the quadratic formula
__magic_name__ : float
# the largest integer less than b_estimate
__magic_name__ : int
# the largest integer less than b_estimate
__magic_name__ : int
# the triangle number corresponding to b_floor
__magic_name__ : int
# the triangle number corresponding to b_ceil
__magic_name__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__magic_name__ : List[Any] = floor(lowerCAmelCase )
__magic_name__ : Dict = ceil(lowerCAmelCase )
__magic_name__ : Any = triangle_numbers[b_floor]
__magic_name__ : Optional[int] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : Any = triangle_b_first_guess * triangle_a
__magic_name__ : Any = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : List[str] = triangle_b_second_guess * triangle_a
__magic_name__ : Optional[int] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'{solution() = }') | 331 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase :Union[str, Any] = {
'''configuration_clip''': [
'''CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''CLIPConfig''',
'''CLIPOnnxConfig''',
'''CLIPTextConfig''',
'''CLIPVisionConfig''',
],
'''processing_clip''': ['''CLIPProcessor'''],
'''tokenization_clip''': ['''CLIPTokenizer'''],
}
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Tuple = ['''CLIPTokenizerFast''']
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Tuple = ['''CLIPFeatureExtractor''']
lowerCAmelCase :int = ['''CLIPImageProcessor''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Tuple = [
'''CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''CLIPModel''',
'''CLIPPreTrainedModel''',
'''CLIPTextModel''',
'''CLIPTextModelWithProjection''',
'''CLIPVisionModel''',
'''CLIPVisionModelWithProjection''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Dict = [
'''TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFCLIPModel''',
'''TFCLIPPreTrainedModel''',
'''TFCLIPTextModel''',
'''TFCLIPVisionModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Union[str, Any] = [
'''FlaxCLIPModel''',
'''FlaxCLIPPreTrainedModel''',
'''FlaxCLIPTextModel''',
'''FlaxCLIPTextPreTrainedModel''',
'''FlaxCLIPVisionModel''',
'''FlaxCLIPVisionPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_clip import (
CLIP_PRETRAINED_CONFIG_ARCHIVE_MAP,
CLIPConfig,
CLIPOnnxConfig,
CLIPTextConfig,
CLIPVisionConfig,
)
from .processing_clip import CLIPProcessor
from .tokenization_clip import CLIPTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_clip_fast import CLIPTokenizerFast
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_clip import CLIPFeatureExtractor
from .image_processing_clip import CLIPImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_clip import (
CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
CLIPModel,
CLIPPreTrainedModel,
CLIPTextModel,
CLIPTextModelWithProjection,
CLIPVisionModel,
CLIPVisionModelWithProjection,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_clip import (
TF_CLIP_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCLIPModel,
TFCLIPPreTrainedModel,
TFCLIPTextModel,
TFCLIPVisionModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_clip import (
FlaxCLIPModel,
FlaxCLIPPreTrainedModel,
FlaxCLIPTextModel,
FlaxCLIPTextPreTrainedModel,
FlaxCLIPVisionModel,
FlaxCLIPVisionPreTrainedModel,
)
else:
import sys
lowerCAmelCase :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Dict = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Tuple = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Any = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 331 | 1 |
'''simple docstring'''
from collections import defaultdict
from math import ceil, sqrt
def lowerCamelCase ( lowerCAmelCase : int = 100_0000 , lowerCAmelCase : int = 10 ):
"""simple docstring"""
__magic_name__ : defaultdict = defaultdict(lowerCAmelCase )
for outer_width in range(3 , (t_limit // 4) + 2 ):
if outer_width * outer_width > t_limit:
__magic_name__ : Dict = max(
ceil(sqrt(outer_width * outer_width - t_limit ) ) , 1 )
else:
__magic_name__ : Union[str, Any] = 1
hole_width_lower_bound += (outer_width - hole_width_lower_bound) % 2
for hole_width in range(lowerCAmelCase , outer_width - 1 , 2 ):
count[outer_width * outer_width - hole_width * hole_width] += 1
return sum(1 for n in count.values() if 1 <= n <= 10 )
if __name__ == "__main__":
print(F'{solution() = }') | 331 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = ["""pixel_values"""]
def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None:
super().__init__(**_A )
__magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384}
__magic_name__ : Dict = get_size_dict(_A , default_to_square=_A )
__magic_name__ : List[Any] = do_resize
__magic_name__ : str = size
# Default value set here for backwards compatibility where the value in config is None
__magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256
__magic_name__ : int = resample
__magic_name__ : List[str] = do_rescale
__magic_name__ : List[Any] = rescale_factor
__magic_name__ : str = do_normalize
__magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
__magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
__magic_name__ : Dict = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__magic_name__ : Dict = int(shortest_edge / crop_pct )
__magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
__magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int:
return rescale(_A , scale=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image:
__magic_name__ : int = do_resize if do_resize is not None else self.do_resize
__magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
__magic_name__ : Optional[Any] = resample if resample is not None else self.resample
__magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : str = image_mean if image_mean is not None else self.image_mean
__magic_name__ : Dict = image_std if image_std is not None else self.image_std
__magic_name__ : Dict = size if size is not None else self.size
__magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A )
__magic_name__ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
__magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images]
if do_rescale:
__magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
__magic_name__ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A ) | 331 | 1 |
'''simple docstring'''
import os
from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import torch
from torch import nn
from ...models.controlnet import ControlNetModel, ControlNetOutput
from ...models.modeling_utils import ModelMixin
from ...utils import logging
lowerCAmelCase :Tuple = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : List[Any] , _A : Union[List[ControlNetModel], Tuple[ControlNetModel]] ) -> Union[str, Any]:
super().__init__()
__magic_name__ : int = nn.ModuleList(_A )
def __lowerCAmelCase ( self : Tuple , _A : torch.FloatTensor , _A : Union[torch.Tensor, float, int] , _A : torch.Tensor , _A : List[torch.tensor] , _A : List[float] , _A : Optional[torch.Tensor] = None , _A : Optional[torch.Tensor] = None , _A : Optional[torch.Tensor] = None , _A : Optional[Dict[str, Any]] = None , _A : bool = False , _A : bool = True , ) -> Union[ControlNetOutput, Tuple]:
for i, (image, scale, controlnet) in enumerate(zip(_A , _A , self.nets ) ):
__magic_name__ , __magic_name__ : Optional[Any] = controlnet(
_A , _A , _A , _A , _A , _A , _A , _A , _A , _A , _A , )
# merge samples
if i == 0:
__magic_name__ , __magic_name__ : Optional[Any] = down_samples, mid_sample
else:
__magic_name__ : Union[str, Any] = [
samples_prev + samples_curr
for samples_prev, samples_curr in zip(_A , _A )
]
mid_block_res_sample += mid_sample
return down_block_res_samples, mid_block_res_sample
def __lowerCAmelCase ( self : Any , _A : Union[str, os.PathLike] , _A : bool = True , _A : Callable = None , _A : bool = False , _A : Optional[str] = None , ) -> List[Any]:
__magic_name__ : Dict = 0
__magic_name__ : Dict = save_directory
for controlnet in self.nets:
controlnet.save_pretrained(
_A , is_main_process=_A , save_function=_A , safe_serialization=_A , variant=_A , )
idx += 1
__magic_name__ : Optional[int] = model_path_to_save + F'_{idx}'
@classmethod
def __lowerCAmelCase ( cls : int , _A : Optional[Union[str, os.PathLike]] , **_A : Tuple ) -> int:
__magic_name__ : Union[str, Any] = 0
__magic_name__ : int = []
# load controlnet and append to list until no controlnet directory exists anymore
# first controlnet has to be saved under `./mydirectory/controlnet` to be compliant with `DiffusionPipeline.from_prertained`
# second, third, ... controlnets have to be saved under `./mydirectory/controlnet_1`, `./mydirectory/controlnet_2`, ...
__magic_name__ : List[str] = pretrained_model_path
while os.path.isdir(_A ):
__magic_name__ : Optional[Any] = ControlNetModel.from_pretrained(_A , **_A )
controlnets.append(_A )
idx += 1
__magic_name__ : int = pretrained_model_path + F'_{idx}'
logger.info(F'{len(_A )} controlnets loaded from {pretrained_model_path}.' )
if len(_A ) == 0:
raise ValueError(
F'No ControlNets found under {os.path.dirname(_A )}. Expected at least {pretrained_model_path + "_0"}.' )
return cls(_A ) | 331 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase :Tuple = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s
lowerCAmelCase :Union[str, Any] = 3E8 # unit of c : m * s^-1
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
__magic_name__ : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__magic_name__ : Optional[int] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__magic_name__ : Union[str, Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod() | 331 | 1 |
'''simple docstring'''
import functools
def lowerCamelCase ( lowerCAmelCase : list[int] , lowerCAmelCase : list[int] ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ) or not all(isinstance(lowerCAmelCase , lowerCAmelCase ) for day in days ):
raise ValueError('The parameter days should be a list of integers' )
if len(lowerCAmelCase ) != 3 or not all(isinstance(lowerCAmelCase , lowerCAmelCase ) for cost in costs ):
raise ValueError('The parameter costs should be a list of three integers' )
if len(lowerCAmelCase ) == 0:
return 0
if min(lowerCAmelCase ) <= 0:
raise ValueError('All days elements should be greater than 0' )
if max(lowerCAmelCase ) >= 366:
raise ValueError('All days elements should be less than 366' )
__magic_name__ : Any = set(lowerCAmelCase )
@functools.cache
def dynamic_programming(lowerCAmelCase : int ) -> int:
if index > 365:
return 0
if index not in days_set:
return dynamic_programming(index + 1 )
return min(
costs[0] + dynamic_programming(index + 1 ) , costs[1] + dynamic_programming(index + 7 ) , costs[2] + dynamic_programming(index + 30 ) , )
return dynamic_programming(1 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 331 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase :Tuple = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase :List[Any] = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase :Optional[Any] = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase :Union[str, Any] = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase :Tuple = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) )
__magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
__magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase ( lowerCAmelCase : int = 100 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Any = PokerHand(lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS]
__magic_name__ : Tuple = poker_hands.copy()
shuffle(lowerCAmelCase )
__magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) )
for index, hand in enumerate(lowerCAmelCase ):
assert hand == poker_hands[index]
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' )
__magic_name__ : Optional[Any] = True
__magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = 0
__magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) )
__magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' )
with open(lowerCAmelCase ) as file_hand:
for line in file_hand:
__magic_name__ : Optional[int] = line[:14].strip()
__magic_name__ : List[Any] = line[15:].strip()
__magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase )
__magic_name__ : List[Any] = player.compare_with(lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 376 | 331 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : Dict ):
"""simple docstring"""
__magic_name__ : int = len(lowerCAmelCase )
while cur > 1:
# Find the maximum number in arr
__magic_name__ : int = arr.index(max(arr[0:cur] ) )
# Reverse from 0 to mi
__magic_name__ : Union[str, Any] = arr[mi::-1] + arr[mi + 1 : len(lowerCAmelCase )]
# Reverse whole list
__magic_name__ : List[str] = arr[cur - 1 :: -1] + arr[cur : len(lowerCAmelCase )]
cur -= 1
return arr
if __name__ == "__main__":
lowerCAmelCase :List[str] = input('''Enter numbers separated by a comma:\n''').strip()
lowerCAmelCase :List[Any] = [int(item) for item in user_input.split(''',''')]
print(pancake_sort(unsorted)) | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase :Union[str, Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase :Optional[Any] = {
'''configuration_wav2vec2''': ['''WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''Wav2Vec2Config'''],
'''feature_extraction_wav2vec2''': ['''Wav2Vec2FeatureExtractor'''],
'''processing_wav2vec2''': ['''Wav2Vec2Processor'''],
'''tokenization_wav2vec2''': ['''Wav2Vec2CTCTokenizer''', '''Wav2Vec2Tokenizer'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Any = [
'''WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''Wav2Vec2ForAudioFrameClassification''',
'''Wav2Vec2ForCTC''',
'''Wav2Vec2ForMaskedLM''',
'''Wav2Vec2ForPreTraining''',
'''Wav2Vec2ForSequenceClassification''',
'''Wav2Vec2ForXVector''',
'''Wav2Vec2Model''',
'''Wav2Vec2PreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[Any] = [
'''TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFWav2Vec2ForCTC''',
'''TFWav2Vec2Model''',
'''TFWav2Vec2PreTrainedModel''',
'''TFWav2Vec2ForSequenceClassification''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[Any] = [
'''FlaxWav2Vec2ForCTC''',
'''FlaxWav2Vec2ForPreTraining''',
'''FlaxWav2Vec2Model''',
'''FlaxWav2Vec2PreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_wavaveca import WAV_2_VEC_2_PRETRAINED_CONFIG_ARCHIVE_MAP, WavaVecaConfig
from .feature_extraction_wavaveca import WavaVecaFeatureExtractor
from .processing_wavaveca import WavaVecaProcessor
from .tokenization_wavaveca import WavaVecaCTCTokenizer, WavaVecaTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_wavaveca import (
WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
WavaVecaForAudioFrameClassification,
WavaVecaForCTC,
WavaVecaForMaskedLM,
WavaVecaForPreTraining,
WavaVecaForSequenceClassification,
WavaVecaForXVector,
WavaVecaModel,
WavaVecaPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
TF_WAV_2_VEC_2_PRETRAINED_MODEL_ARCHIVE_LIST,
TFWavaVecaForCTC,
TFWavaVecaForSequenceClassification,
TFWavaVecaModel,
TFWavaVecaPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_wavaveca import (
FlaxWavaVecaForCTC,
FlaxWavaVecaForPreTraining,
FlaxWavaVecaModel,
FlaxWavaVecaPreTrainedModel,
)
else:
import sys
lowerCAmelCase :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase :Any = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , **_A : Union[str, Any] ) -> Tuple:
super().__init__(**_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] , _A : Union[str, List[str], "Image", List["Image"]] , **_A : Dict ) -> Dict:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> Optional[int]:
__magic_name__ : str = {}
if "candidate_labels" in kwargs:
__magic_name__ : str = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__magic_name__ : Tuple = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __lowerCAmelCase ( self : str , _A : Dict , _A : Optional[Any]=None , _A : int="This is a photo of {}." ) -> int:
__magic_name__ : Dict = load_image(_A )
__magic_name__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
__magic_name__ : Optional[Any] = candidate_labels
__magic_name__ : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels]
__magic_name__ : str = self.tokenizer(_A , return_tensors=self.framework , padding=_A )
__magic_name__ : Optional[Any] = [text_inputs]
return inputs
def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> str:
__magic_name__ : str = model_inputs.pop('candidate_labels' )
__magic_name__ : str = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , _A ):
__magic_name__ : Dict = text_inputs[0]
else:
# Batching case.
__magic_name__ : Optional[Any] = text_inputs[0][0]
__magic_name__ : List[Any] = self.model(**_A , **_A )
__magic_name__ : str = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[int]:
__magic_name__ : Tuple = model_outputs.pop('candidate_labels' )
__magic_name__ : Union[str, Any] = model_outputs['logits'][0]
if self.framework == "pt":
__magic_name__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 )
__magic_name__ : Tuple = probs.tolist()
if not isinstance(_A , _A ):
__magic_name__ : Any = [scores]
elif self.framework == "tf":
__magic_name__ : Any = stable_softmax(_A , axis=-1 )
__magic_name__ : Dict = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__magic_name__ : Union[str, Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] )
]
return result | 331 | 1 |
'''simple docstring'''
import os
from pickle import UnpicklingError
from typing import Dict, Tuple
import jax
import jax.numpy as jnp
import numpy as np
from flax.serialization import from_bytes
from flax.traverse_util import flatten_dict, unflatten_dict
import transformers
from .utils import logging
lowerCAmelCase :Dict = logging.get_logger(__name__)
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : List[str] , lowerCAmelCase : Dict , lowerCAmelCase : Dict=False ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a PyTorch model in Flax, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
if not is_sharded:
__magic_name__ : List[Any] = os.path.abspath(lowerCAmelCase )
logger.info(f'Loading PyTorch weights from {pt_path}' )
__magic_name__ : Dict = torch.load(lowerCAmelCase , map_location='cpu' )
logger.info(f'PyTorch checkpoint contains {sum(t.numel() for t in pt_state_dict.values() ):,} parameters.' )
__magic_name__ : List[str] = convert_pytorch_state_dict_to_flax(lowerCAmelCase , lowerCAmelCase )
else:
# model is sharded and pytorch_checkpoint_path already contains the list of .pt shard files
__magic_name__ : List[Any] = convert_pytorch_sharded_state_dict_to_flax(lowerCAmelCase , lowerCAmelCase )
return flax_state_dict
def lowerCamelCase ( lowerCAmelCase : Tuple[str] , lowerCAmelCase : np.ndarray , lowerCAmelCase : Dict[str, jnp.ndarray] , lowerCAmelCase : str , ):
"""simple docstring"""
def is_key_or_prefix_key_in_dict(lowerCAmelCase : Tuple[str] ) -> bool:
return len(set(lowerCAmelCase ) & {key, (model_prefix,) + key} ) > 0
# layer norm
__magic_name__ : List[str] = pt_tuple_key[:-1] + ('scale',)
if pt_tuple_key[-1] in ["weight", "gamma"] and is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer mean
__magic_name__ : List[str] = pt_tuple_key[:-1] + ('mean',)
if pt_tuple_key[-1] == "running_mean" and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# batch norm layer var
__magic_name__ : List[str] = pt_tuple_key[:-1] + ('var',)
if pt_tuple_key[-1] == "running_var" and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# embedding
__magic_name__ : Optional[int] = pt_tuple_key[:-1] + ('embedding',)
if pt_tuple_key[-1] == "weight" and is_key_or_prefix_key_in_dict(lowerCAmelCase ):
return renamed_pt_tuple_key, pt_tensor
# conv layer
__magic_name__ : int = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4 and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
__magic_name__ : Optional[int] = pt_tensor.transpose(2 , 3 , 1 , 0 )
return renamed_pt_tuple_key, pt_tensor
# linear layer
__magic_name__ : List[Any] = pt_tuple_key[:-1] + ('kernel',)
if pt_tuple_key[-1] == "weight" and not is_key_or_prefix_key_in_dict(lowerCAmelCase ):
__magic_name__ : Dict = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
__magic_name__ : Any = pt_tuple_key[:-1] + ('weight',)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
__magic_name__ : Union[str, Any] = pt_tuple_key[:-1] + ('bias',)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
__magic_name__ : Dict = None
if pt_tuple_key[-3::2] == ("parametrizations", "original0"):
__magic_name__ : str = pt_tuple_key[-2] + '_g'
elif pt_tuple_key[-3::2] == ("parametrizations", "original1"):
__magic_name__ : int = pt_tuple_key[-2] + '_v'
if name is not None:
__magic_name__ : str = pt_tuple_key[:-3] + (name,)
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = {k: v.numpy() for k, v in pt_state_dict.items()}
__magic_name__ : List[Any] = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers
if "params" in flax_model.params:
__magic_name__ : Union[str, Any] = flax_model.params['params']
else:
__magic_name__ : Optional[int] = flax_model.params
__magic_name__ : Union[str, Any] = flatten_dict(lowerCAmelCase )
# add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__magic_name__ : Dict = flatten_dict(flax_model.params['batch_stats'] )
random_flax_state_dict.update(lowerCAmelCase )
__magic_name__ : str = {}
__magic_name__ : Tuple = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__magic_name__ : List[Any] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__magic_name__ : Any = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__magic_name__ : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__magic_name__ : Dict = pt_tuple_key[1:]
# Correctly rename weight parameters
__magic_name__ , __magic_name__ : List[str] = rename_key_and_reshape_tensor(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# add model prefix if necessary
__magic_name__ : Union[str, Any] = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__magic_name__ : Any = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1] or "var" in flax_key[-1]:
__magic_name__ : int = jnp.asarray(lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowerCAmelCase , lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
__magic_name__ : str = jnp.asarray(lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
__magic_name__ : Optional[Any] = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
import torch
# Load the index
__magic_name__ : Tuple = {}
for shard_file in shard_filenames:
# load using msgpack utils
__magic_name__ : str = torch.load(lowerCAmelCase )
__magic_name__ : str = {k: v.numpy() for k, v in pt_state_dict.items()}
__magic_name__ : Tuple = flax_model.base_model_prefix
# use params dict if the model contains batch norm layers and then add batch_stats keys,values to dict
if "batch_stats" in flax_model.params:
__magic_name__ : int = flax_model.params['params']
__magic_name__ : Union[str, Any] = flatten_dict(lowerCAmelCase )
random_flax_state_dict.update(flatten_dict(flax_model.params['batch_stats'] ) )
else:
__magic_name__ : int = flax_model.params
__magic_name__ : List[Any] = flatten_dict(lowerCAmelCase )
__magic_name__ : Dict = (model_prefix not in flax_model_params) and (
model_prefix in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
__magic_name__ : List[str] = (model_prefix in flax_model_params) and (
model_prefix not in {k.split('.' )[0] for k in pt_state_dict.keys()}
)
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
__magic_name__ : List[Any] = tuple(pt_key.split('.' ) )
# remove base model prefix if necessary
__magic_name__ : Union[str, Any] = pt_tuple_key[0] == model_prefix
if load_model_with_head_into_base_model and has_base_model_prefix:
__magic_name__ : Dict = pt_tuple_key[1:]
# Correctly rename weight parameters
__magic_name__ , __magic_name__ : Tuple = rename_key_and_reshape_tensor(
lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# add model prefix if necessary
__magic_name__ : Dict = (model_prefix,) + flax_key in random_flax_state_dict
if load_base_model_into_model_with_head and require_base_model_prefix:
__magic_name__ : Union[str, Any] = (model_prefix,) + flax_key
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
f'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
f'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
# add batch stats if the model contains batchnorm layers
if "batch_stats" in flax_model.params:
if "mean" in flax_key[-1]:
__magic_name__ : int = jnp.asarray(lowerCAmelCase )
continue
if "var" in flax_key[-1]:
__magic_name__ : Optional[Any] = jnp.asarray(lowerCAmelCase )
continue
# remove num_batches_tracked key
if "num_batches_tracked" in flax_key[-1]:
flax_state_dict.pop(lowerCAmelCase , lowerCAmelCase )
continue
# also add unexpected weight so that warning is thrown
__magic_name__ : Optional[Any] = jnp.asarray(lowerCAmelCase )
else:
# also add unexpected weight so that warning is thrown
__magic_name__ : List[str] = jnp.asarray(lowerCAmelCase )
return unflatten_dict(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = os.path.abspath(lowerCAmelCase )
logger.info(f'Loading Flax weights from {flax_checkpoint_path}' )
# import correct flax class
__magic_name__ : str = getattr(lowerCAmelCase , 'Flax' + model.__class__.__name__ )
# load flax weight dict
with open(lowerCAmelCase , 'rb' ) as state_f:
try:
__magic_name__ : Tuple = from_bytes(lowerCAmelCase , state_f.read() )
except UnpicklingError:
raise EnvironmentError(f'Unable to convert {flax_checkpoint_path} to Flax deserializable object. ' )
return load_flax_weights_in_pytorch_model(lowerCAmelCase , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
try:
import torch # noqa: F401
except ImportError:
logger.error(
'Loading a Flax weights in PyTorch, requires both PyTorch and Flax to be installed. Please see'
' https://pytorch.org/ and https://flax.readthedocs.io/en/latest/installation.html for installation'
' instructions.' )
raise
# check if we have bf16 weights
__magic_name__ : Tuple = flatten_dict(jax.tree_util.tree_map(lambda lowerCAmelCase : x.dtype == jnp.bfloataa , lowerCAmelCase ) ).values()
if any(lowerCAmelCase ):
# convert all weights to fp32 if the are bf16 since torch.from_numpy can-not handle bf16
# and bf16 is not fully supported in PT yet.
logger.warning(
'Found ``bfloat16`` weights in Flax model. Casting all ``bfloat16`` weights to ``float32`` '
'before loading those in PyTorch model.' )
__magic_name__ : Dict = jax.tree_util.tree_map(
lambda lowerCAmelCase : params.astype(np.floataa ) if params.dtype == jnp.bfloataa else params , lowerCAmelCase )
__magic_name__ : Union[str, Any] = flatten_dict(lowerCAmelCase )
__magic_name__ : int = pt_model.state_dict()
__magic_name__ : Union[str, Any] = (pt_model.base_model_prefix in flax_state) and (
pt_model.base_model_prefix not in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
__magic_name__ : Dict = (pt_model.base_model_prefix not in flax_state) and (
pt_model.base_model_prefix in {k.split('.' )[0] for k in pt_model_dict.keys()}
)
# keep track of unexpected & missing keys
__magic_name__ : str = []
__magic_name__ : Optional[Any] = set(pt_model_dict.keys() )
for flax_key_tuple, flax_tensor in flax_state_dict.items():
__magic_name__ : Union[str, Any] = flax_key_tuple[0] == pt_model.base_model_prefix
__magic_name__ : Optional[Any] = '.'.join((pt_model.base_model_prefix,) + flax_key_tuple ) in pt_model_dict
# adapt flax_key to prepare for loading from/to base model only
if load_model_with_head_into_base_model and has_base_model_prefix:
__magic_name__ : Union[str, Any] = flax_key_tuple[1:]
elif load_base_model_into_model_with_head and require_base_model_prefix:
__magic_name__ : Any = (pt_model.base_model_prefix,) + flax_key_tuple
# rename flax weights to PyTorch format
if flax_key_tuple[-1] == "kernel" and flax_tensor.ndim == 4 and ".".join(lowerCAmelCase ) not in pt_model_dict:
# conv layer
__magic_name__ : str = flax_key_tuple[:-1] + ('weight',)
__magic_name__ : List[str] = jnp.transpose(lowerCAmelCase , (3, 2, 0, 1) )
elif flax_key_tuple[-1] == "kernel" and ".".join(lowerCAmelCase ) not in pt_model_dict:
# linear layer
__magic_name__ : List[str] = flax_key_tuple[:-1] + ('weight',)
__magic_name__ : Dict = flax_tensor.T
elif flax_key_tuple[-1] in ["scale", "embedding"]:
__magic_name__ : Optional[Any] = flax_key_tuple[:-1] + ('weight',)
# adding batch stats from flax batch norm to pt
elif "mean" in flax_key_tuple[-1]:
__magic_name__ : List[str] = flax_key_tuple[:-1] + ('running_mean',)
elif "var" in flax_key_tuple[-1]:
__magic_name__ : Union[str, Any] = flax_key_tuple[:-1] + ('running_var',)
if "batch_stats" in flax_state:
__magic_name__ : Optional[int] = '.'.join(flax_key_tuple[1:] ) # Remove the params/batch_stats header
else:
__magic_name__ : Optional[int] = '.'.join(lowerCAmelCase )
# We also need to look at `pt_model_dict` and see if there are keys requiring further transformation.
__magic_name__ : Dict = {}
# New `weight_norm` from https://github.com/huggingface/transformers/pull/24030
for key in pt_model_dict:
__magic_name__ : Tuple = key.split('.' )
__magic_name__ : List[Any] = None
if key_components[-3::2] == ["parametrizations", "original0"]:
__magic_name__ : Dict = key_components[-2] + '_g'
elif key_components[-3::2] == ["parametrizations", "original1"]:
__magic_name__ : List[Any] = key_components[-2] + '_v'
if name is not None:
__magic_name__ : Dict = key_components[:-3] + [name]
__magic_name__ : Optional[Any] = '.'.join(lowerCAmelCase )
__magic_name__ : Dict = key
if flax_key in special_pt_names:
__magic_name__ : Tuple = special_pt_names[flax_key]
if flax_key in pt_model_dict:
if flax_tensor.shape != pt_model_dict[flax_key].shape:
raise ValueError(
f'Flax checkpoint seems to be incorrect. Weight {flax_key_tuple} was expected '
f'to be of shape {pt_model_dict[flax_key].shape}, but is {flax_tensor.shape}.' )
else:
# add weight to pytorch dict
__magic_name__ : int = np.asarray(lowerCAmelCase ) if not isinstance(lowerCAmelCase , np.ndarray ) else flax_tensor
__magic_name__ : str = torch.from_numpy(lowerCAmelCase )
# remove from missing keys
missing_keys.remove(lowerCAmelCase )
else:
# weight is not expected by PyTorch model
unexpected_keys.append(lowerCAmelCase )
pt_model.load_state_dict(lowerCAmelCase )
# re-transform missing_keys to list
__magic_name__ : Optional[Any] = list(lowerCAmelCase )
if len(lowerCAmelCase ) > 0:
logger.warning(
'Some weights of the Flax model were not used when initializing the PyTorch model'
f' {pt_model.__class__.__name__}: {unexpected_keys}\n- This IS expected if you are initializing'
f' {pt_model.__class__.__name__} from a Flax model trained on another task or with another architecture'
' (e.g. initializing a BertForSequenceClassification model from a FlaxBertForPreTraining model).\n- This'
f' IS NOT expected if you are initializing {pt_model.__class__.__name__} from a Flax model that you expect'
' to be exactly identical (e.g. initializing a BertForSequenceClassification model from a'
' FlaxBertForSequenceClassification model).' )
else:
logger.warning(f'All Flax model weights were used when initializing {pt_model.__class__.__name__}.\n' )
if len(lowerCAmelCase ) > 0:
logger.warning(
f'Some weights of {pt_model.__class__.__name__} were not initialized from the Flax model and are newly'
f' initialized: {missing_keys}\nYou should probably TRAIN this model on a down-stream task to be able to'
' use it for predictions and inference.' )
else:
logger.warning(
f'All the weights of {pt_model.__class__.__name__} were initialized from the Flax model.\n'
'If your task is similar to the task the model of the checkpoint was trained on, '
f'you can already use {pt_model.__class__.__name__} for predictions without further training.' )
return pt_model | 331 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase :int = '''pt'''
elif is_tf_available():
lowerCAmelCase :Optional[Any] = '''tf'''
else:
lowerCAmelCase :Optional[Any] = '''jax'''
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = ByTaTokenizer
A_ : Dict = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
super().setUp()
__magic_name__ : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __lowerCAmelCase ( self : Tuple , **_A : Optional[int] ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : int=False , _A : Union[str, Any]=20 , _A : Optional[int]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__magic_name__ : Optional[Any] = []
for i in range(len(_A ) ):
try:
__magic_name__ : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__magic_name__ : Any = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) )
__magic_name__ : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
__magic_name__ : Optional[int] = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
__magic_name__ : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__magic_name__ : List[str] = [t[0] for t in toks]
# Ensure consistency
__magic_name__ : Optional[int] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
__magic_name__ : int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
__magic_name__ : Union[str, Any] = ' ' + output_txt
__magic_name__ : Dict = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def __lowerCAmelCase ( self : int ) -> str:
__magic_name__ : Any = self.ta_base_tokenizer
__magic_name__ : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
__magic_name__ : List[str] = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __lowerCAmelCase ( self : int ) -> Tuple:
__magic_name__ : Optional[int] = self.ta_base_tokenizer
__magic_name__ : Optional[int] = 'Unicode €.'
__magic_name__ : Optional[Any] = tokenizer(_A )
__magic_name__ : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__magic_name__ : Any = tokenizer.decode(_A )
self.assertEqual(_A , 'Unicode €.</s>' )
__magic_name__ : Any = tokenizer('e è é ê ë' )
__magic_name__ : str = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__magic_name__ : List[str] = tokenizer.decode(_A )
self.assertEqual(_A , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __lowerCAmelCase ( self : Any ) -> int:
__magic_name__ : List[Any] = self.ta_base_tokenizer
__magic_name__ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__magic_name__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__magic_name__ : Any = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
__magic_name__ : str = list(batch.input_ids.numpy()[0] )
else:
__magic_name__ : Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : str = self.ta_base_tokenizer
__magic_name__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__magic_name__ : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _A )
self.assertIn('attention_mask' , _A )
self.assertNotIn('decoder_input_ids' , _A )
self.assertNotIn('decoder_attention_mask' , _A )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
__magic_name__ : Union[str, Any] = self.ta_base_tokenizer
__magic_name__ : Tuple = [
'Summary of the text.',
'Another summary.',
]
__magic_name__ : Dict = tokenizer(
text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : str = self.ta_base_tokenizer
__magic_name__ : Any = ['A long paragraph for summarization. </s>']
__magic_name__ : List[str] = ['Summary of the text. </s>']
# fmt: off
__magic_name__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__magic_name__ : List[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__magic_name__ : str = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['input_ids'][0] )
self.assertEqual(_A , batch['labels'][0] )
def __lowerCAmelCase ( self : Any ) -> str:
# safety check on max_len default value so we are sure the test works
__magic_name__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__magic_name__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : str = tempfile.mkdtemp()
__magic_name__ : Tuple = ' He is very happy, UNwant\u00E9d,running'
__magic_name__ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ : List[str] = tokenizer.__class__.from_pretrained(_A )
__magic_name__ : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
__magic_name__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : Optional[Any] = tempfile.mkdtemp()
__magic_name__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__magic_name__ : Union[str, Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ : Any = tokenizer.__class__.from_pretrained(_A )
__magic_name__ : Dict = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__magic_name__ : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
__magic_name__ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__magic_name__ : Union[str, Any] = json.load(_A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__magic_name__ : Optional[Any] = json.load(_A )
__magic_name__ : List[str] = [F'<extra_id_{i}>' for i in range(125 )]
__magic_name__ : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
__magic_name__ : Tuple = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__ : str = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )]
__magic_name__ : Optional[Any] = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
__magic_name__ : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
__magic_name__ : List[Any] = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
pass
def __lowerCAmelCase ( self : List[str] ) -> int:
pass
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
pass
def __lowerCAmelCase ( self : List[Any] ) -> int:
pass
def __lowerCAmelCase ( self : str ) -> Tuple:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__magic_name__ : List[str] = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
__magic_name__ : int = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : List[str] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
__magic_name__ : List[str] = 0
__magic_name__ : str = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] )
setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] ) | 331 | 1 |
'''simple docstring'''
def lowerCamelCase ( ):
"""simple docstring"""
for n in range(1 , 100_0000 ):
yield n * (n + 1) // 2
def lowerCamelCase ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__magic_name__ : Tuple = 1
__magic_name__ : Any = 2
while i * i <= n:
__magic_name__ : str = 0
while n % i == 0:
n //= i
multiplicity += 1
divisors_count *= multiplicity + 1
i += 1
if n > 1:
divisors_count *= 2
return divisors_count
def lowerCamelCase ( ):
"""simple docstring"""
return next(i for i in triangle_number_generator() if count_divisors(lowerCAmelCase ) > 500 )
if __name__ == "__main__":
print(solution()) | 331 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ : Dict = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
__magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]]
__magic_name__ : List[Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 )
__magic_name__ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 )
__magic_name__ : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 )
__magic_name__ : Any = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] ) | 331 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : int , lowerCAmelCase : Any , lowerCAmelCase : Dict , lowerCAmelCase : List[str] ):
"""simple docstring"""
if index == r:
for j in range(lowerCAmelCase ):
print(data[j] , end=' ' )
print(' ' )
return
# When no more elements are there to put in data[]
if i >= n:
return
# current is included, put next at next location
__magic_name__ : int = arr[i]
combination_util(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , index + 1 , lowerCAmelCase , i + 1 )
# current is excluded, replace it with
# next (Note that i+1 is passed, but
# index is not changed)
combination_util(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , i + 1 )
# The main function that prints all combinations
# of size r in arr[] of size n. This function
# mainly uses combinationUtil()
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int , lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Optional[Any] = [0] * r
# Print all combination using temporary array 'data[]'
combination_util(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , 0 , lowerCAmelCase , 0 )
if __name__ == "__main__":
# Driver code to check the function above
lowerCAmelCase :List[Any] = [1_0, 2_0, 3_0, 4_0, 5_0]
print_combination(arr, len(arr), 3)
# This code is contributed by Ambuj sahu | 331 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase :List[str] = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
lowerCAmelCase :List[Any] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Tuple = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Optional[Any] = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[int] = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowerCAmelCase :Tuple = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Union[str, Any] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowerCAmelCase :Dict = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowerCAmelCase :Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowerCAmelCase :Any = ''''''
lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
__magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : str = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
__magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Any = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) | 331 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer
from ...configuration_utils import PretrainedConfig
from ...file_utils import TensorType, is_torch_available
from ...onnx import OnnxConfig, OnnxConfigWithPast, OnnxSeqaSeqConfigWithPast
from ...onnx.utils import compute_effective_axis_dimension
from ...utils import logging
lowerCAmelCase :Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase :Dict = {
'''facebook/blenderbot_small-90M''': '''https://huggingface.co/facebook/blenderbot_small-90M/resolve/main/config.json''',
# See all BlenderbotSmall models at https://huggingface.co/models?filter=blenderbot_small
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Dict = """blenderbot-small"""
A_ : int = ["""past_key_values"""]
A_ : int = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Optional[int] , _A : Optional[Any]=50265 , _A : str=512 , _A : Optional[Any]=8 , _A : List[str]=2048 , _A : str=16 , _A : Tuple=8 , _A : str=2048 , _A : List[Any]=16 , _A : List[str]=0.0 , _A : Union[str, Any]=0.0 , _A : Union[str, Any]=True , _A : Tuple=True , _A : int="gelu" , _A : str=512 , _A : str=0.1 , _A : int=0.0 , _A : Dict=0.0 , _A : Union[str, Any]=0.02 , _A : Any=1 , _A : Dict=False , _A : List[Any]=0 , _A : Optional[int]=1 , _A : Tuple=2 , _A : int=2 , **_A : Dict , ) -> Dict:
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Union[str, Any] = max_position_embeddings
__magic_name__ : Any = d_model
__magic_name__ : int = encoder_ffn_dim
__magic_name__ : str = encoder_layers
__magic_name__ : str = encoder_attention_heads
__magic_name__ : Any = decoder_ffn_dim
__magic_name__ : Optional[int] = decoder_layers
__magic_name__ : Dict = decoder_attention_heads
__magic_name__ : List[str] = dropout
__magic_name__ : List[Any] = attention_dropout
__magic_name__ : Any = activation_dropout
__magic_name__ : Union[str, Any] = activation_function
__magic_name__ : int = init_std
__magic_name__ : Any = encoder_layerdrop
__magic_name__ : Tuple = decoder_layerdrop
__magic_name__ : Dict = use_cache
__magic_name__ : Any = encoder_layers
__magic_name__ : str = scale_embedding # scale factor will be sqrt(d_model) if True
super().__init__(
pad_token_id=_A , bos_token_id=_A , eos_token_id=_A , is_encoder_decoder=_A , decoder_start_token_id=_A , forced_eos_token_id=_A , **_A , )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Optional[Any] = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__magic_name__ : int = {0: 'batch'}
__magic_name__ : Union[str, Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
else:
__magic_name__ : Optional[int] = {0: 'batch', 1: 'decoder_sequence'}
__magic_name__ : Optional[int] = {0: 'batch', 1: 'decoder_sequence'}
if self.use_past:
self.fill_with_past_key_values_(_A , direction='inputs' )
elif self.task == "causal-lm":
# TODO: figure this case out.
__magic_name__ : int = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
] )
if self.use_past:
__magic_name__ , __magic_name__ : Any = self.num_layers
for i in range(_A ):
__magic_name__ : Union[str, Any] = {0: 'batch', 2: 'past_sequence + sequence'}
__magic_name__ : Dict = {0: 'batch', 2: 'past_sequence + sequence'}
else:
__magic_name__ : Tuple = OrderedDict(
[
('input_ids', {0: 'batch', 1: 'encoder_sequence'}),
('attention_mask', {0: 'batch', 1: 'encoder_sequence'}),
('decoder_input_ids', {0: 'batch', 1: 'decoder_sequence'}),
('decoder_attention_mask', {0: 'batch', 1: 'decoder_sequence'}),
] )
return common_inputs
@property
def __lowerCAmelCase ( self : List[Any] ) -> Mapping[str, Mapping[int, str]]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Dict = super().outputs
else:
__magic_name__ : int = super(_A , self ).outputs
if self.use_past:
__magic_name__ , __magic_name__ : str = self.num_layers
for i in range(_A ):
__magic_name__ : Union[str, Any] = {0: 'batch', 2: 'past_sequence + sequence'}
__magic_name__ : Any = {0: 'batch', 2: 'past_sequence + sequence'}
return common_outputs
def __lowerCAmelCase ( self : str , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]:
__magic_name__ : Dict = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
# Generate decoder inputs
__magic_name__ : Any = seq_length if not self.use_past else 1
__magic_name__ : List[Any] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
__magic_name__ : List[str] = {F'decoder_{name}': tensor for name, tensor in decoder_inputs.items()}
__magic_name__ : Optional[Any] = dict(**_A , **_A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__magic_name__ , __magic_name__ : Tuple = common_inputs['input_ids'].shape
__magic_name__ : List[Any] = common_inputs['decoder_input_ids'].shape[1]
__magic_name__ , __magic_name__ : Any = self.num_attention_heads
__magic_name__ : str = (
batch,
num_encoder_attention_heads,
encoder_seq_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : str = decoder_seq_length + 3
__magic_name__ : Optional[Any] = (
batch,
num_decoder_attention_heads,
decoder_past_length,
self._config.hidden_size // num_decoder_attention_heads,
)
__magic_name__ : Any = torch.cat(
[common_inputs['decoder_attention_mask'], torch.ones(_A , _A )] , dim=1 )
__magic_name__ : Dict = []
# If the number of encoder and decoder layers are present in the model configuration, both are considered
__magic_name__ , __magic_name__ : int = self.num_layers
__magic_name__ : Any = min(_A , _A )
__magic_name__ : List[str] = max(_A , _A ) - min_num_layers
__magic_name__ : Union[str, Any] = 'encoder' if num_encoder_layers > num_decoder_layers else 'decoder'
for _ in range(_A ):
common_inputs["past_key_values"].append(
(
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
torch.zeros(_A ),
) )
# TODO: test this.
__magic_name__ : Dict = encoder_shape if remaining_side_name == 'encoder' else decoder_shape
for _ in range(_A , _A ):
common_inputs["past_key_values"].append((torch.zeros(_A ), torch.zeros(_A )) )
return common_inputs
def __lowerCAmelCase ( self : Optional[Any] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]:
__magic_name__ : Optional[int] = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , _A , _A , _A , _A )
if self.use_past:
if not is_torch_available():
raise ValueError('Cannot generate dummy past_keys inputs without PyTorch installed.' )
else:
import torch
__magic_name__ , __magic_name__ : Tuple = common_inputs['input_ids'].shape
# Not using the same length for past_key_values
__magic_name__ : Tuple = seqlen + 2
__magic_name__ , __magic_name__ : Optional[int] = self.num_layers
__magic_name__ , __magic_name__ : Optional[int] = self.num_attention_heads
__magic_name__ : Any = (
batch,
num_encoder_attention_heads,
past_key_values_length,
self._config.hidden_size // num_encoder_attention_heads,
)
__magic_name__ : str = common_inputs['attention_mask'].dtype
__magic_name__ : Union[str, Any] = torch.cat(
[common_inputs['attention_mask'], torch.ones(_A , _A , dtype=_A )] , dim=1 )
__magic_name__ : Optional[int] = [
(torch.zeros(_A ), torch.zeros(_A )) for _ in range(_A )
]
return common_inputs
def __lowerCAmelCase ( self : Optional[int] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]:
# Copied from OnnxConfig.generate_dummy_inputs
# Did not use super(OnnxConfigWithPast, self).generate_dummy_inputs for code clarity.
# If dynamic axis (-1) we forward with a fixed dimension of 2 samples to avoid optimizations made by ONNX
__magic_name__ : str = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_batch , num_token_to_add=0 )
# If dynamic axis (-1) we forward with a fixed dimension of 8 tokens to avoid optimizations made by ONNX
__magic_name__ : str = tokenizer.num_special_tokens_to_add(_A )
__magic_name__ : str = compute_effective_axis_dimension(
_A , fixed_dimension=OnnxConfig.default_fixed_sequence , num_token_to_add=_A )
# Generate dummy inputs according to compute batch and sequence
__magic_name__ : int = [' '.join([tokenizer.unk_token] ) * seq_length] * batch_size
__magic_name__ : List[str] = dict(tokenizer(_A , return_tensors=_A ) )
return common_inputs
def __lowerCAmelCase ( self : List[Any] , _A : PreTrainedTokenizer , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional[TensorType] = None , ) -> Mapping[str, Any]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : Union[str, Any] = self._generate_dummy_inputs_for_default_and_seqaseq_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
elif self.task == "causal-lm":
__magic_name__ : List[str] = self._generate_dummy_inputs_for_causal_lm(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
else:
__magic_name__ : Tuple = self._generate_dummy_inputs_for_sequence_classification_and_question_answering(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
return common_inputs
def __lowerCAmelCase ( self : Optional[int] , _A : List[str] , _A : List[str] , _A : Dict , _A : Tuple ) -> Optional[int]:
if self.task in ["default", "seq2seq-lm"]:
__magic_name__ : List[str] = super()._flatten_past_key_values_(_A , _A , _A , _A )
else:
__magic_name__ : Dict = super(_A , self )._flatten_past_key_values_(
_A , _A , _A , _A ) | 331 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Optional[int]=7 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=True , _A : Dict=True , _A : int=99 , _A : str=32 , _A : List[Any]=2 , _A : Any=4 , _A : List[str]=37 , _A : List[str]="gelu" , _A : Any=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Union[str, Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : str=4 , _A : int=None , ) -> int:
__magic_name__ : str = parent
__magic_name__ : List[Any] = 13
__magic_name__ : Union[str, Any] = 7
__magic_name__ : Tuple = True
__magic_name__ : Dict = True
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = True
__magic_name__ : int = 99
__magic_name__ : List[str] = 384
__magic_name__ : Optional[int] = 2
__magic_name__ : List[Any] = 4
__magic_name__ : int = 37
__magic_name__ : Union[str, Any] = 'gelu'
__magic_name__ : Optional[int] = 0.1
__magic_name__ : str = 0.1
__magic_name__ : Optional[Any] = 512
__magic_name__ : Any = 16
__magic_name__ : Union[str, Any] = 2
__magic_name__ : Any = 0.02
__magic_name__ : List[str] = 3
__magic_name__ : Tuple = 4
__magic_name__ : List[Any] = 128
__magic_name__ : Optional[Any] = 2
__magic_name__ : List[str] = 9
__magic_name__ : str = 1
__magic_name__ : List[str] = None
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[Any] = None
if self.use_input_mask:
__magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[str] = None
if self.use_token_type_ids:
__magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
__magic_name__ : int = None
if self.use_labels:
__magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : int , _A : int , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : int , _A : Union[str, Any] ) -> Any:
__magic_name__ : Dict = TFConvBertModel(config=_A )
__magic_name__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__magic_name__ : Any = [input_ids, input_mask]
__magic_name__ : Tuple = model(_A )
__magic_name__ : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : int , _A : str , _A : Dict , _A : Dict , _A : Dict , _A : Any , _A : Optional[int] , _A : int ) -> Optional[Any]:
__magic_name__ : Dict = TFConvBertForMaskedLM(config=_A )
__magic_name__ : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> Tuple:
__magic_name__ : Any = self.num_labels
__magic_name__ : str = TFConvBertForSequenceClassification(config=_A )
__magic_name__ : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : str , _A : str , _A : int , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = self.num_choices
__magic_name__ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
__magic_name__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : str = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Optional[int] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : List[Any] , _A : int , _A : List[str] , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : Optional[int] ) -> List[Any]:
__magic_name__ : List[Any] = self.num_labels
__magic_name__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
__magic_name__ : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[str] ) -> int:
__magic_name__ : Dict = TFConvBertForQuestionAnswering(config=_A )
__magic_name__ : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[str] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : str = config_and_inputs
__magic_name__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : List[str] = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Tuple = False
A_ : Any = False
A_ : List[Any] = False
def __lowerCAmelCase ( self : List[Any] ) -> int:
__magic_name__ : Optional[Any] = TFConvBertModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : int ) -> Any:
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : Dict ) -> List[str]:
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[int] = True
__magic_name__ : Any = True
if hasattr(_A , 'use_cache' ):
__magic_name__ : List[Any] = True
__magic_name__ : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : Optional[Any] = getattr(self.model_tester , 'key_length' , _A )
for model_class in self.all_model_classes:
__magic_name__ : List[str] = self._prepare_for_class(_A , _A )
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Tuple = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
__magic_name__ : Union[str, Any] = os.path.join(_A , 'saved_model' , '1' )
__magic_name__ : Optional[int] = tf.keras.models.load_model(_A )
__magic_name__ : Optional[Any] = model(_A )
if self.is_encoder_decoder:
__magic_name__ : Optional[int] = outputs['encoder_hidden_states']
__magic_name__ : Tuple = outputs['encoder_attentions']
else:
__magic_name__ : Union[str, Any] = outputs['hidden_states']
__magic_name__ : Optional[Any] = outputs['attentions']
self.assertEqual(len(_A ) , _A )
__magic_name__ : Optional[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
__magic_name__ : Optional[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_A )
def __lowerCAmelCase ( self : List[str] ) -> Any:
__magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : str = True
__magic_name__ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'key_length' , _A )
__magic_name__ : Optional[int] = getattr(self.model_tester , 'key_length' , _A )
def check_decoder_attentions_output(_A : List[Any] ):
__magic_name__ : Tuple = len(_A )
self.assertEqual(out_len % 2 , 0 )
__magic_name__ : Any = outputs.decoder_attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_A : int ):
__magic_name__ : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = False
__magic_name__ : List[str] = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
__magic_name__ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
__magic_name__ : Any = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ : Optional[int] = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Optional[int] = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
__magic_name__ : str = True
__magic_name__ : str = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : str = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) )
self.assertEqual(model.config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : int ) -> int:
__magic_name__ : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__magic_name__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : Tuple = model(_A )[0]
__magic_name__ : str = [1, 6, 768]
self.assertEqual(output.shape , _A )
__magic_name__ : Tuple = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 ) | 331 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import PaddingStrategy, TensorType, logging
lowerCAmelCase :Any = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ["""input_values""", """padding_mask"""]
def __init__( self : int , _A : int = 1 , _A : int = 24000 , _A : float = 0.0 , _A : float = None , _A : float = None , **_A : Dict , ) -> Optional[int]:
super().__init__(feature_size=_A , sampling_rate=_A , padding_value=_A , **_A )
__magic_name__ : Tuple = chunk_length_s
__magic_name__ : List[Any] = overlap
@property
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
if self.chunk_length_s is None:
return None
else:
return int(self.chunk_length_s * self.sampling_rate )
@property
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
if self.chunk_length_s is None or self.overlap is None:
return None
else:
return max(1 , int((1.0 - self.overlap) * self.chunk_length ) )
def __call__( self : str , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : Optional[Union[bool, str, PaddingStrategy]] = None , _A : Optional[bool] = False , _A : Optional[int] = None , _A : Optional[Union[str, TensorType]] = None , _A : Optional[int] = None , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
F'The model corresponding to this feature extractor: {self} was trained using a sampling rate of'
F' {self.sampling_rate}. Please make sure that the provided audio input was sampled with'
F' {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
if padding and truncation:
raise ValueError('Both padding and truncation were set. Make sure you only set one.' )
elif padding is None:
# by default let's pad the inputs
__magic_name__ : Union[str, Any] = True
__magic_name__ : Union[str, Any] = bool(
isinstance(_A , (list, tuple) ) and (isinstance(raw_audio[0] , (np.ndarray, tuple, list) )) )
if is_batched:
__magic_name__ : Optional[int] = [np.asarray(_A , dtype=np.floataa ).T for audio in raw_audio]
elif not is_batched and not isinstance(_A , np.ndarray ):
__magic_name__ : Optional[Any] = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_audio.dtype is np.dtype(np.floataa ):
__magic_name__ : Optional[Any] = raw_audio.astype(np.floataa )
# always return batch
if not is_batched:
__magic_name__ : Union[str, Any] = [np.asarray(_A ).T]
# verify inputs are valid
for idx, example in enumerate(_A ):
if example.ndim > 2:
raise ValueError(F'Expected input shape (channels, length) but got shape {example.shape}' )
if self.feature_size == 1 and example.ndim != 1:
raise ValueError(F'Expected mono audio but example has {example.shape[-1]} channels' )
if self.feature_size == 2 and example.shape[-1] != 2:
raise ValueError(F'Expected stereo audio but example has {example.shape[-1]} channels' )
__magic_name__ : int = None
__magic_name__ : int = BatchFeature({'input_values': raw_audio} )
if self.chunk_stride is not None and self.chunk_length is not None and max_length is None:
if truncation:
__magic_name__ : Dict = min(array.shape[0] for array in raw_audio )
__magic_name__ : List[str] = int(np.floor(max_length / self.chunk_stride ) )
__magic_name__ : Dict = (nb_step - 1) * self.chunk_stride + self.chunk_length
elif padding:
__magic_name__ : Tuple = max(array.shape[0] for array in raw_audio )
__magic_name__ : Optional[Any] = int(np.ceil(max_length / self.chunk_stride ) )
__magic_name__ : Tuple = (nb_step - 1) * self.chunk_stride + self.chunk_length
__magic_name__ : Dict = 'max_length'
else:
__magic_name__ : Union[str, Any] = input_values
# normal padding on batch
if padded_inputs is None:
__magic_name__ : Optional[int] = self.pad(
_A , max_length=_A , truncation=_A , padding=_A , return_attention_mask=_A , )
if padding:
__magic_name__ : str = padded_inputs.pop('attention_mask' )
__magic_name__ : List[Any] = []
for example in padded_inputs.pop('input_values' ):
if self.feature_size == 1:
__magic_name__ : int = example[..., None]
input_values.append(example.T )
__magic_name__ : Dict = input_values
if return_tensors is not None:
__magic_name__ : Any = padded_inputs.convert_to_tensors(_A )
return padded_inputs | 331 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase :Dict = pytest.mark.integration
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
__magic_name__ : Union[str, Any] = dset.map(
lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A )
__magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def __lowerCAmelCase ( self : Any ) -> str:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Tuple ) -> int:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
from elasticsearch import Elasticsearch
__magic_name__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : int = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__magic_name__ : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_A )
__magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
import faiss
__magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__magic_name__ : str = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Optional[int] = 1
__magic_name__ , __magic_name__ : str = index.search(_A )
self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1]
__magic_name__ , __magic_name__ : str = index.search_batch(_A )
self.assertRaises(_A , index.search_batch , queries[0] )
__magic_name__ : List[Any] = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _A )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
import faiss
__magic_name__ : str = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__magic_name__ : str = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_A ):
__magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
import faiss
__magic_name__ : Any = faiss.IndexFlat(5 )
__magic_name__ : Optional[Any] = FaissIndex(custom_index=_A )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
import faiss
__magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
index.save(tmp_file.name )
__magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ : Dict = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Tuple = 1
__magic_name__ , __magic_name__ : Optional[Any] = index.search(_A )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
import faiss
__magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__magic_name__ : Dict = 'index.faiss'
__magic_name__ : Optional[Any] = f'mock://{index_name}'
index.save(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
__magic_name__ : List[str] = 1
__magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> Dict:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : Any = Elasticsearch()
__magic_name__ : Union[str, Any] = {'acknowledged': True}
__magic_name__ : Tuple = ElasticSearchIndex(es_client=_A )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__magic_name__ : str = 'foo'
__magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__magic_name__ : str = 'foo'
__magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A )
__magic_name__ : Tuple = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A )
# batched queries with timeout
__magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 )
__magic_name__ : Optional[int] = [scores[0] for scores in total_scores]
__magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A ) | 331 | 1 |
'''simple docstring'''
from ....configuration_utils import PretrainedConfig
from ....utils import logging
lowerCAmelCase :str = logging.get_logger(__name__)
# TODO: upload to AWS
lowerCAmelCase :str = {
'''yjernite/retribert-base-uncased''': (
'''https://huggingface.co/yjernite/retribert-base-uncased/resolve/main/config.json'''
),
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Dict = """retribert"""
def __init__( self : str , _A : List[str]=30522 , _A : Optional[Any]=768 , _A : List[Any]=8 , _A : Dict=12 , _A : Any=3072 , _A : Tuple="gelu" , _A : Optional[int]=0.1 , _A : List[str]=0.1 , _A : Union[str, Any]=512 , _A : int=2 , _A : Tuple=0.02 , _A : Optional[Any]=1E-12 , _A : Optional[Any]=True , _A : str=128 , _A : Union[str, Any]=0 , **_A : Tuple , ) -> List[str]:
super().__init__(pad_token_id=_A , **_A )
__magic_name__ : str = vocab_size
__magic_name__ : int = hidden_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : List[str] = num_attention_heads
__magic_name__ : str = hidden_act
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Optional[Any] = hidden_dropout_prob
__magic_name__ : Optional[Any] = attention_probs_dropout_prob
__magic_name__ : Optional[int] = max_position_embeddings
__magic_name__ : List[str] = type_vocab_size
__magic_name__ : int = initializer_range
__magic_name__ : str = layer_norm_eps
__magic_name__ : Dict = share_encoders
__magic_name__ : int = projection_dim | 331 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() )
__magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__)
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
"""simple docstring"""
if metric == "rouge2":
__magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
__magic_name__ : int = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
__magic_name__ : List[Any] = ModelCheckpoint(
dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return EarlyStopping(
monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int:
__magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None:
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
__magic_name__ : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
__magic_name__ : List[Any] = od / 'test_results.txt'
__magic_name__ : Dict = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
__magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , 'a+' ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
__magic_name__ : Optional[Any] = metrics[key]
if isinstance(_A , torch.Tensor ):
__magic_name__ : Tuple = val.item()
__magic_name__ : int = F'{key}: {val:.6f}\n'
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
__magic_name__ : str = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_A )
@rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple:
try:
__magic_name__ : str = pl_module.model.model.num_parameters()
except AttributeError:
__magic_name__ : List[str] = pl_module.model.num_parameters()
__magic_name__ : List[Any] = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , 'test' )
@rank_zero_only
def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 331 | 1 |
'''simple docstring'''
from manim import *
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) -> Optional[int]:
__magic_name__ : Union[str, Any] = Rectangle(height=0.5 , width=0.5 )
__magic_name__ : int = Rectangle(height=0.46 , width=0.46 ).set_stroke(width=0 )
__magic_name__ : Tuple = [mem.copy() for i in range(6 )]
__magic_name__ : List[Any] = [mem.copy() for i in range(6 )]
__magic_name__ : Optional[Any] = VGroup(*_A ).arrange(_A , buff=0 )
__magic_name__ : Union[str, Any] = VGroup(*_A ).arrange(_A , buff=0 )
__magic_name__ : List[str] = VGroup(_A , _A ).arrange(_A , buff=0 )
__magic_name__ : str = Text('CPU' , font_size=24 )
__magic_name__ : Dict = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A )
cpu.move_to([-2.5, -0.5, 0] )
self.add(_A )
__magic_name__ : List[Any] = [mem.copy() for i in range(4 )]
__magic_name__ : Any = VGroup(*_A ).arrange(_A , buff=0 )
__magic_name__ : int = Text('GPU' , font_size=24 )
__magic_name__ : Union[str, Any] = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A )
gpu.move_to([-1, -1, 0] )
self.add(_A )
__magic_name__ : int = [mem.copy() for i in range(6 )]
__magic_name__ : Optional[int] = VGroup(*_A ).arrange(_A , buff=0 )
__magic_name__ : Optional[int] = Text('Model' , font_size=24 )
__magic_name__ : str = Group(_A , _A ).arrange(_A , buff=0.5 , aligned_edge=_A )
model.move_to([3, -1.0, 0] )
self.add(_A )
__magic_name__ : List[str] = []
for i, rect in enumerate(_A ):
rect.set_stroke(_A )
# target = fill.copy().set_fill(YELLOW, opacity=0.7)
# target.move_to(rect)
# self.add(target)
__magic_name__ : List[Any] = Rectangle(height=0.46 / 4 , width=0.46 / 3 ).set_stroke(width=0.0 ).set_fill(_A , opacity=0.7 )
if i == 0:
cpu_target.next_to(cpu_left_col_base[0].get_corner(DOWN + LEFT ) , buff=0.02 , direction=_A )
cpu_target.set_x(cpu_target.get_x() + 0.1 )
elif i == 3:
cpu_target.next_to(cpu_targs[0] , direction=_A , buff=0.0 )
else:
cpu_target.next_to(cpu_targs[i - 1] , direction=_A , buff=0.0 )
self.add(_A )
cpu_targs.append(_A )
__magic_name__ : Tuple = [mem.copy() for i in range(6 )]
__magic_name__ : Union[str, Any] = VGroup(*_A ).arrange(_A , buff=0 )
__magic_name__ : List[Any] = Text('Loaded Checkpoint' , font_size=24 )
__magic_name__ : Tuple = Group(_A , _A ).arrange(_A , aligned_edge=_A , buff=0.4 )
checkpoint.move_to([3, 0.5, 0] )
__magic_name__ : str = Square(side_length=2.2 )
key.move_to([-5, 2, 0] )
__magic_name__ : Any = MarkupText(
F'<b>Key:</b>\n\n<span fgcolor=\'{YELLOW}\'>●</span> Empty Model' , font_size=18 , )
key_text.move_to([-5, 2.4, 0] )
self.add(_A , _A )
__magic_name__ : Optional[int] = MarkupText(
F'<span fgcolor=\'{BLUE}\'>●</span> Checkpoint' , font_size=18 , )
blue_text.next_to(_A , DOWN * 2.4 , aligned_edge=key_text.get_left() )
__magic_name__ : List[str] = MarkupText(
F'Next, a <i><span fgcolor="{BLUE}">second</span></i> model is loaded into memory,\nwith the weights of a <span fgcolor="{BLUE}">single shard</span>.' , font_size=24 , )
step_a.move_to([2, 2, 0] )
self.play(Write(_A ) , Write(_A ) )
self.play(Write(_A , run_time=1 ) , Create(_A , run_time=1 ) )
__magic_name__ : int = []
__magic_name__ : Any = []
for i, rect in enumerate(_A ):
__magic_name__ : List[Any] = fill.copy().set_fill(_A , opacity=0.7 )
target.move_to(_A )
first_animations.append(GrowFromCenter(_A , run_time=1 ) )
__magic_name__ : List[str] = target.copy()
cpu_target.generate_target()
if i < 5:
cpu_target.target.move_to(cpu_left_col_base[i + 1] )
else:
cpu_target.target.move_to(cpu_right_col_base[i - 5] )
second_animations.append(MoveToTarget(_A , run_time=1.5 ) )
self.play(*_A )
self.play(*_A )
self.wait() | 331 |
'''simple docstring'''
def lowerCamelCase ( ):
"""simple docstring"""
return 1
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int = 200 ):
"""simple docstring"""
return two_pound(lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 331 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
if a < 0 or b < 0:
raise ValueError('the value of both inputs must be positive' )
__magic_name__ : List[str] = str(bin(lowerCAmelCase ) )[2:] # remove the leading "0b"
__magic_name__ : Dict = str(bin(lowerCAmelCase ) )[2:] # remove the leading "0b"
__magic_name__ : Any = max(len(lowerCAmelCase ) , len(lowerCAmelCase ) )
return "0b" + "".join(
str(int(char_a != char_b ) )
for char_a, char_b in zip(a_binary.zfill(lowerCAmelCase ) , b_binary.zfill(lowerCAmelCase ) ) )
if __name__ == "__main__":
import doctest
doctest.testmod() | 331 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Dict = ["""flax""", """transformers"""]
def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[int] = ["""flax""", """transformers"""]
def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] ) | 331 | 1 |
'''simple docstring'''
import os
def lowerCamelCase ( ):
"""simple docstring"""
with open(os.path.dirname(lowerCAmelCase ) + '/p022_names.txt' ) as file:
__magic_name__ : Union[str, Any] = str(file.readlines()[0] )
__magic_name__ : Dict = names.replace('"' , '' ).split(',' )
names.sort()
__magic_name__ : List[Any] = 0
__magic_name__ : Optional[Any] = 0
for i, name in enumerate(lowerCAmelCase ):
for letter in name:
name_score += ord(lowerCAmelCase ) - 64
total_score += (i + 1) * name_score
__magic_name__ : int = 0
return total_score
if __name__ == "__main__":
print(solution()) | 331 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase :Tuple = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any:
super().__init__(*_A , **_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]:
__magic_name__ : Union[str, Any] = {}
__magic_name__ : Optional[Any] = {}
if prompt is not None:
__magic_name__ : Union[str, Any] = prompt
if generate_kwargs is not None:
__magic_name__ : str = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__magic_name__ : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
__magic_name__ : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict:
__magic_name__ : List[Any] = load_image(_A )
if prompt is not None:
if not isinstance(_A , _A ):
raise ValueError(
F'Received an invalid text input, got - {type(_A )} - but expected a single string. '
'Note also that one single text can be provided for conditional image to text generation.' )
__magic_name__ : Any = self.model.config.model_type
if model_type == "git":
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids
__magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids
__magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
__magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework )
model_inputs.update(_A )
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation' )
else:
__magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__magic_name__ : int = None
return model_inputs
def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _A )
and all(x is None for x in model_inputs['input_ids'] )
):
__magic_name__ : str = None
if generate_kwargs is None:
__magic_name__ : Optional[int] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name )
__magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A )
return model_outputs
def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]:
__magic_name__ : Optional[Any] = []
for output_ids in model_outputs:
__magic_name__ : Union[str, Any] = {
'generated_text': self.tokenizer.decode(
_A , skip_special_tokens=_A , )
}
records.append(_A )
return records | 331 | 1 |
'''simple docstring'''
from abc import ABC, abstractmethod
from typing import List, Optional
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : str ) -> Dict:
# test for the above condition
self.test()
def __lowerCAmelCase ( self : List[Any] ) -> Any:
__magic_name__ : Any = 0
__magic_name__ : List[Any] = False
while not completed:
if counter == 1:
self.reset()
__magic_name__ : int = self.advance()
if not self.does_advance(_A ):
raise Exception(
'Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.' )
__magic_name__ , __magic_name__ , __magic_name__ : Union[str, Any] = self.update(_A )
counter += 1
if counter > 10000:
raise Exception('update() does not fulfill the constraint.' )
if self.remaining() != 0:
raise Exception('Custom Constraint is not defined correctly.' )
@abstractmethod
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def __lowerCAmelCase ( self : Any , _A : int ) -> List[Any]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def __lowerCAmelCase ( self : Dict , _A : int ) -> Optional[Any]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def __lowerCAmelCase ( self : List[str] ) -> int:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def __lowerCAmelCase ( self : List[Any] ) -> Optional[Any]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
@abstractmethod
def __lowerCAmelCase ( self : Union[str, Any] , _A : str=False ) -> Union[str, Any]:
raise NotImplementedError(
F'{self.__class__} is an abstract class. Only classes inheriting this class can be called.' )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : str , _A : List[int] ) -> Optional[Any]:
super(_A , self ).__init__()
if not isinstance(_A , _A ) or len(_A ) == 0:
raise ValueError(F'`token_ids` has to be a non-empty list, but is {token_ids}.' )
if any((not isinstance(_A , _A ) or token_id < 0) for token_id in token_ids ):
raise ValueError(F'Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.' )
__magic_name__ : Optional[int] = token_ids
__magic_name__ : Dict = len(self.token_ids )
__magic_name__ : Dict = -1 # the index of the currently fulfilled step
__magic_name__ : Optional[Any] = False
def __lowerCAmelCase ( self : str ) -> List[str]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self : Optional[int] , _A : int ) -> List[str]:
if not isinstance(_A , _A ):
raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(_A )}' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __lowerCAmelCase ( self : Union[str, Any] , _A : int ) -> Dict:
if not isinstance(_A , _A ):
raise ValueError(F'`token_id` has to be an `int`, but is {token_id} of type {type(_A )}' )
__magic_name__ : Tuple = False
__magic_name__ : Optional[Any] = False
__magic_name__ : str = False
if self.does_advance(_A ):
self.fulfilled_idx += 1
__magic_name__ : Union[str, Any] = True
if self.fulfilled_idx == (self.seqlen - 1):
__magic_name__ : List[Any] = True
__magic_name__ : Union[str, Any] = completed
else:
# failed to make progress.
__magic_name__ : Optional[int] = True
self.reset()
return stepped, completed, reset
def __lowerCAmelCase ( self : List[str] ) -> int:
__magic_name__ : Optional[int] = False
__magic_name__ : int = 0
def __lowerCAmelCase ( self : str ) -> str:
return self.seqlen - (self.fulfilled_idx + 1)
def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any]=False ) -> Tuple:
__magic_name__ : Dict = PhrasalConstraint(self.token_ids )
if stateful:
__magic_name__ : Dict = self.seqlen
__magic_name__ : Union[str, Any] = self.fulfilled_idx
__magic_name__ : str = self.completed
return new_constraint
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Dict , _A : List[List[int]] , _A : Tuple=True ) -> List[str]:
__magic_name__ : Any = max([len(_A ) for one in nested_token_ids] )
__magic_name__ : Tuple = {}
for token_ids in nested_token_ids:
__magic_name__ : List[Any] = root
for tidx, token_id in enumerate(_A ):
if token_id not in level:
__magic_name__ : str = {}
__magic_name__ : Any = level[token_id]
if no_subsets and self.has_subsets(_A , _A ):
raise ValueError(
'Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'
F' {nested_token_ids}.' )
__magic_name__ : List[Any] = root
def __lowerCAmelCase ( self : Optional[int] , _A : int ) -> Union[str, Any]:
__magic_name__ : List[str] = self.trie
for current_token in current_seq:
__magic_name__ : Optional[int] = start[current_token]
__magic_name__ : Any = list(start.keys() )
return next_tokens
def __lowerCAmelCase ( self : str , _A : List[Any] ) -> Tuple:
__magic_name__ : List[Any] = self.next_tokens(_A )
return len(_A ) == 0
def __lowerCAmelCase ( self : List[Any] , _A : int ) -> List[Any]:
__magic_name__ : str = list(root.values() )
if len(_A ) == 0:
return 1
else:
return sum([self.count_leaves(_A ) for nn in next_nodes] )
def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : List[Any] ) -> str:
__magic_name__ : int = self.count_leaves(_A )
return len(_A ) != leaf_count
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Tuple , _A : List[List[int]] ) -> int:
super(_A , self ).__init__()
if not isinstance(_A , _A ) or len(_A ) == 0:
raise ValueError(F'`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.' )
if any(not isinstance(_A , _A ) for token_ids in nested_token_ids ):
raise ValueError(F'`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.' )
if any(
any((not isinstance(_A , _A ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
F'Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.' )
__magic_name__ : Optional[int] = DisjunctiveTrie(_A )
__magic_name__ : Tuple = nested_token_ids
__magic_name__ : List[Any] = self.trie.max_height
__magic_name__ : List[Any] = []
__magic_name__ : str = False
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
__magic_name__ : Dict = self.trie.next_tokens(self.current_seq )
if len(_A ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self : Union[str, Any] , _A : int ) -> List[str]:
if not isinstance(_A , _A ):
raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(_A )}' )
__magic_name__ : Union[str, Any] = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __lowerCAmelCase ( self : Optional[Any] , _A : int ) -> Dict:
if not isinstance(_A , _A ):
raise ValueError(F'`token_id` is supposed to be type `int`, but is {token_id} of type {type(_A )}' )
__magic_name__ : List[Any] = False
__magic_name__ : List[str] = False
__magic_name__ : Optional[Any] = False
if self.does_advance(_A ):
self.current_seq.append(_A )
__magic_name__ : Dict = True
else:
__magic_name__ : Dict = True
self.reset()
__magic_name__ : Optional[int] = self.trie.reached_leaf(self.current_seq )
__magic_name__ : int = completed
return stepped, completed, reset
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
__magic_name__ : List[Any] = False
__magic_name__ : Tuple = []
def __lowerCAmelCase ( self : Tuple ) -> Tuple:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __lowerCAmelCase ( self : Tuple , _A : Any=False ) -> List[str]:
__magic_name__ : str = DisjunctiveConstraint(self.token_ids )
if stateful:
__magic_name__ : List[Any] = self.seqlen
__magic_name__ : Any = self.current_seq
__magic_name__ : List[Any] = self.completed
return new_constraint
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Any , _A : List[Constraint] ) -> int:
__magic_name__ : Optional[Any] = constraints
# max # of steps required to fulfill a given constraint
__magic_name__ : int = max([c.seqlen for c in constraints] )
__magic_name__ : int = len(_A )
__magic_name__ : Optional[int] = False
self.init_state()
def __lowerCAmelCase ( self : Optional[int] ) -> str:
__magic_name__ : Tuple = []
__magic_name__ : Union[str, Any] = None
__magic_name__ : Tuple = [constraint.copy(stateful=_A ) for constraint in self.constraints]
def __lowerCAmelCase ( self : int ) -> Optional[int]:
__magic_name__ : List[str] = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : int = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__magic_name__ : Union[str, Any] = constraint.advance()
if isinstance(_A , _A ):
token_list.append(_A )
elif isinstance(_A , _A ):
token_list.extend(_A )
else:
__magic_name__ : str = self.inprogress_constraint.advance()
if isinstance(_A , _A ):
token_list.append(_A )
elif isinstance(_A , _A ):
token_list.extend(_A )
if len(_A ) == 0:
return None
else:
return token_list
def __lowerCAmelCase ( self : int , _A : Optional[List[int]] ) -> str:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__magic_name__ , __magic_name__ : Optional[Any] = self.add(_A )
# the entire list of constraints are fulfilled
if self.completed:
break
def __lowerCAmelCase ( self : Optional[Any] , _A : int ) -> Tuple:
if not isinstance(_A , _A ):
raise ValueError(F'`token_id` should be an `int`, but is `{token_id}`.' )
__magic_name__ , __magic_name__ : str = False, False
if self.completed:
__magic_name__ : List[Any] = True
__magic_name__ : str = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = self.inprogress_constraint.update(_A )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=_A ) )
__magic_name__ : int = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__magic_name__ : Dict = None
if len(self.pending_constraints ) == 0:
# we're done!
__magic_name__ : str = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(_A ):
__magic_name__ , __magic_name__ , __magic_name__ : Tuple = pending_constraint.update(_A )
if not stepped:
raise Exception(
'`constraint.update(token_id)` is not yielding incremental progress, '
'even though `constraint.does_advance(token_id)` is true.' )
if complete:
self.complete_constraints.append(_A )
__magic_name__ : Optional[int] = None
if not complete and stepped:
__magic_name__ : Any = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__magic_name__ : Union[str, Any] = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__magic_name__ : List[Any] = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __lowerCAmelCase ( self : Union[str, Any] , _A : str=True ) -> str:
__magic_name__ : Union[str, Any] = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__magic_name__ : List[str] = [
constraint.copy(stateful=_A ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__magic_name__ : Dict = self.inprogress_constraint.copy(stateful=_A )
__magic_name__ : Optional[Any] = [constraint.copy() for constraint in self.pending_constraints]
return new_state | 331 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCAmelCase :Dict = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
lowerCAmelCase :str = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCAmelCase :Any = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys())
lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class _lowerCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_A )
__magic_name__ : List[str] = 0
__magic_name__ : Union[str, Any] = Path(self.hparams.output_dir )
__magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__magic_name__ : Optional[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , )
else:
__magic_name__ : PretrainedConfig = config
__magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , _A , _A ):
assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , _A , getattr(self.hparams , _A ) )
if tokenizer is None:
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , )
else:
__magic_name__ : PreTrainedTokenizer = tokenizer
__magic_name__ : Optional[int] = MODEL_MODES[mode]
if model is None:
__magic_name__ : Tuple = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , )
else:
__magic_name__ : str = model
def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple:
__magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler]
__magic_name__ : str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : Optional[Any] = self.model
__magic_name__ : int = ['bias', 'LayerNorm.weight']
__magic_name__ : Dict = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__magic_name__ : str = Adafactor(
_A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A )
else:
__magic_name__ : Tuple = AdamW(
_A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__magic_name__ : List[str] = optimizer
__magic_name__ : int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]:
return self.validation_step(_A , _A )
def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any:
return self.validation_end(_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
__magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str:
if stage == "test":
__magic_name__ : Any = len(self.test_dataloader().dataset )
else:
__magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A )
__magic_name__ : int = len(self.train_dataloader().dataset )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]:
raise NotImplementedError('You must implement this for your task' )
def __lowerCAmelCase ( self : int ) -> List[str]:
return self.train_loader
def __lowerCAmelCase ( self : Tuple ) -> int:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
_A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None:
__magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' )
__magic_name__ : List[Any] = self.step_count
self.model.save_pretrained(_A )
self.tokenizer.save_pretrained(_A )
@staticmethod
def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple:
parser.add_argument(
'--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A )
parser.add_argument('--train_batch_size' , default=32 , type=_A )
parser.add_argument('--eval_batch_size' , default=32 , type=_A )
parser.add_argument('--adafactor' , action='store_true' )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_A )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]:
__magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler']
__magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_A )
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]:
rank_zero_info('***** Validation results *****' )
__magic_name__ : str = trainer.callback_metrics
# Log results
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]:
rank_zero_info('***** Test results *****' )
__magic_name__ : Optional[int] = trainer.callback_metrics
# Log and save results to file
__magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(_A , 'w' ) as writer:
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__magic_name__ : Any = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
__magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase )
if logging_callback is None:
__magic_name__ : Dict = LoggingCallback()
__magic_name__ : List[str] = {}
if args.fpaa:
__magic_name__ : Dict = 16
if args.gpus > 1:
__magic_name__ : Tuple = 'auto'
__magic_name__ : int = 'ddp'
__magic_name__ : str = args.accumulate_grad_batches
__magic_name__ : str = None
__magic_name__ : List[str] = 'auto'
__magic_name__ : List[Any] = pl.Trainer.from_argparse_args(
lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , )
if args.do_train:
trainer.fit(lowerCAmelCase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer | 331 | 1 |
'''simple docstring'''
import unittest
from transformers import load_tool
from transformers.utils import is_torch_available
if is_torch_available():
import torch
from transformers.testing_utils import require_torch
from .test_tools_common import ToolTesterMixin
@require_torch
class _lowerCamelCase ( unittest.TestCase , lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : Optional[int] = load_tool('text-to-speech' )
self.tool.setup()
def __lowerCAmelCase ( self : int ) -> List[str]:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ : Tuple = self.tool('hey' )
__magic_name__ : List[Any] = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) )
def __lowerCAmelCase ( self : Dict ) -> int:
# SpeechT5 isn't deterministic
torch.manual_seed(0 )
__magic_name__ : Optional[Any] = self.tool('hey' )
__magic_name__ : int = result.to_raw()
self.assertTrue(
torch.allclose(
resulting_tensor[:3] , torch.tensor([-0.000_5966_6688_3211_5829, -0.000_3657_6401_9079_5064, -0.0001_3439_5027_9988_3485] ) , ) ) | 331 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Dict = (DDPMScheduler,)
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str:
__magic_name__ : str = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_A )
return config
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Dict = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self : Tuple ) -> int:
__magic_name__ : Tuple = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : str = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Union[str, Any] = self.dummy_model()
__magic_name__ : List[Any] = self.dummy_sample_deter
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : Tuple = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : Dict = pred_prev_sample
__magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) )
__magic_name__ : Dict = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
__magic_name__ : Any = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Dict = self.dummy_model()
__magic_name__ : str = self.dummy_sample_deter
__magic_name__ : str = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : List[Any] = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : List[Any] = pred_prev_sample
__magic_name__ : int = torch.sum(torch.abs(_A ) )
__magic_name__ : Any = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowerCAmelCase ( self : List[str] ) -> str:
__magic_name__ : Dict = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Optional[Any] = scheduler_class(**_A )
__magic_name__ : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_A )
__magic_name__ : List[str] = scheduler.timesteps
for i, timestep in enumerate(_A ):
if i == len(_A ) - 1:
__magic_name__ : Optional[int] = -1
else:
__magic_name__ : List[Any] = timesteps[i + 1]
__magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A )
__magic_name__ : Any = prev_t.item()
self.assertEqual(_A , _A )
def __lowerCAmelCase ( self : Tuple ) -> str:
__magic_name__ : str = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 1, 0]
__magic_name__ : Tuple = len(_A )
with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_A ) | 331 | 1 |
'''simple docstring'''
import random
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
UNetaDConditionModel,
VideoToVideoSDPipeline,
)
from diffusers.utils import floats_tensor, is_xformers_available, skip_mps
from diffusers.utils.testing_utils import enable_full_determinism, slow, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_VARIATION_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = VideoToVideoSDPipeline
A_ : int = TEXT_GUIDED_IMAGE_VARIATION_PARAMS.union({"""video"""} ) - {"""image""", """width""", """height"""}
A_ : Optional[Any] = TEXT_GUIDED_IMAGE_VARIATION_BATCH_PARAMS.union({"""video"""} ) - {"""image"""}
A_ : Optional[Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
A_ : List[Any] = False
# No `output_type`.
A_ : str = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def __lowerCAmelCase ( self : Optional[int] ) -> Any:
torch.manual_seed(0 )
__magic_name__ : str = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'CrossAttnDownBlock3D', 'DownBlock3D') , up_block_types=('UpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D', 'CrossAttnUpBlock3D') , cross_attention_dim=32 , attention_head_dim=4 , )
__magic_name__ : int = DDIMScheduler(
beta_start=0.0_0085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=_A , set_alpha_to_one=_A , )
torch.manual_seed(0 )
__magic_name__ : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__magic_name__ : Any = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-05 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__magic_name__ : List[str] = CLIPTextModel(_A )
__magic_name__ : Optional[int] = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__magic_name__ : Optional[Any] = {
'unet': unet,
'scheduler': scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
}
return components
def __lowerCAmelCase ( self : Union[str, Any] , _A : List[str] , _A : Optional[int]=0 ) -> List[str]:
# 3 frames
__magic_name__ : List[Any] = floats_tensor((1, 3, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
if str(_A ).startswith('mps' ):
__magic_name__ : Optional[Any] = torch.manual_seed(_A )
else:
__magic_name__ : Optional[Any] = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : List[Any] = {
'prompt': 'A painting of a squirrel eating a burger',
'video': video,
'generator': generator,
'num_inference_steps': 2,
'guidance_scale': 6.0,
'output_type': 'pt',
}
return inputs
def __lowerCAmelCase ( self : Any ) -> List[Any]:
__magic_name__ : Any = 'cpu' # ensure determinism for the device-dependent torch.Generator
__magic_name__ : Any = self.get_dummy_components()
__magic_name__ : Optional[Any] = VideoToVideoSDPipeline(**_A )
__magic_name__ : Optional[Any] = sd_pipe.to(_A )
sd_pipe.set_progress_bar_config(disable=_A )
__magic_name__ : List[str] = self.get_dummy_inputs(_A )
__magic_name__ : List[str] = 'np'
__magic_name__ : List[Any] = sd_pipe(**_A ).frames
__magic_name__ : Optional[int] = frames[0][-3:, -3:, -1]
assert frames[0].shape == (32, 32, 3)
__magic_name__ : Union[str, Any] = np.array([106, 117, 113, 174, 137, 112, 148, 151, 131] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1E-2
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCAmelCase ( self : str ) -> Optional[int]:
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_A , expected_max_diff=5E-3 )
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __lowerCAmelCase ( self : Any ) -> List[str]:
pass
@unittest.skip(reason='Batching needs to be properly figured out first for this pipeline.' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
pass
@unittest.skip(reason='`num_images_per_prompt` argument is not supported for this pipeline.' )
def __lowerCAmelCase ( self : str ) -> Tuple:
pass
def __lowerCAmelCase ( self : str ) -> int:
return super().test_progress_bar()
@slow
@skip_mps
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] ) -> List[Any]:
__magic_name__ : int = VideoToVideoSDPipeline.from_pretrained('cerspense/zeroscope_v2_XL' , torch_dtype=torch.floataa )
pipe.enable_model_cpu_offload()
# 10 frames
__magic_name__ : Tuple = torch.Generator(device='cpu' ).manual_seed(0 )
__magic_name__ : List[str] = torch.randn((1, 10, 3, 1024, 576) , generator=_A )
__magic_name__ : List[str] = video.to('cuda' )
__magic_name__ : Union[str, Any] = 'Spiderman is surfing'
__magic_name__ : Optional[int] = pipe(_A , video=_A , generator=_A , num_inference_steps=3 , output_type='pt' ).frames
__magic_name__ : Optional[int] = np.array([-1.045_8984, -1.127_9297, -0.966_3086, -0.9150_3906, -0.7509_7656] )
assert np.abs(video_frames.cpu().numpy()[0, 0, 0, 0, -5:] - expected_array ).sum() < 1E-2 | 331 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = IFInpaintingPipeline
A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
return self._get_dummy_components()
def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]:
if str(_A ).startswith('mps' ):
__magic_name__ : Optional[Any] = torch.manual_seed(_A )
else:
__magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCAmelCase ( self : List[Any] ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __lowerCAmelCase ( self : Dict ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self : Tuple ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
self._test_save_load_local()
def __lowerCAmelCase ( self : Any ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 331 | 1 |
'''simple docstring'''
import copy
import os
from typing import TYPE_CHECKING, List, Union
if TYPE_CHECKING:
pass
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase :Optional[Any] = logging.get_logger(__name__)
lowerCAmelCase :Any = {
'''kakaobrain/align-base''': '''https://huggingface.co/kakaobrain/align-base/resolve/main/config.json''',
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[str] = """align_text_model"""
def __init__( self : Union[str, Any] , _A : Optional[Any]=30522 , _A : List[str]=768 , _A : Optional[int]=12 , _A : Optional[int]=12 , _A : Optional[int]=3072 , _A : Dict="gelu" , _A : List[str]=0.1 , _A : Optional[Any]=0.1 , _A : int=512 , _A : Optional[Any]=2 , _A : Optional[int]=0.02 , _A : Optional[int]=1E-12 , _A : Optional[Any]=0 , _A : Optional[int]="absolute" , _A : Tuple=True , **_A : List[Any] , ) -> List[Any]:
super().__init__(**_A )
__magic_name__ : List[Any] = vocab_size
__magic_name__ : Optional[Any] = hidden_size
__magic_name__ : List[Any] = num_hidden_layers
__magic_name__ : Dict = num_attention_heads
__magic_name__ : int = hidden_act
__magic_name__ : List[Any] = intermediate_size
__magic_name__ : List[str] = hidden_dropout_prob
__magic_name__ : int = attention_probs_dropout_prob
__magic_name__ : List[str] = max_position_embeddings
__magic_name__ : str = type_vocab_size
__magic_name__ : Optional[Any] = initializer_range
__magic_name__ : str = layer_norm_eps
__magic_name__ : List[str] = position_embedding_type
__magic_name__ : List[Any] = use_cache
__magic_name__ : Union[str, Any] = pad_token_id
@classmethod
def __lowerCAmelCase ( cls : str , _A : Union[str, os.PathLike] , **_A : str ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_A )
__magic_name__ , __magic_name__ : str = cls.get_config_dict(_A , **_A )
# get the text config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
__magic_name__ : Union[str, Any] = config_dict['text_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_A , **_A )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[Any] = """align_vision_model"""
def __init__( self : List[str] , _A : int = 3 , _A : int = 600 , _A : float = 2.0 , _A : float = 3.1 , _A : int = 8 , _A : List[int] = [3, 3, 5, 3, 5, 5, 3] , _A : List[int] = [32, 16, 24, 40, 80, 112, 192] , _A : List[int] = [16, 24, 40, 80, 112, 192, 320] , _A : List[int] = [] , _A : List[int] = [1, 2, 2, 2, 1, 2, 1] , _A : List[int] = [1, 2, 2, 3, 3, 4, 1] , _A : List[int] = [1, 6, 6, 6, 6, 6, 6] , _A : float = 0.25 , _A : str = "swish" , _A : int = 2560 , _A : str = "mean" , _A : float = 0.02 , _A : float = 0.001 , _A : float = 0.99 , _A : float = 0.2 , **_A : str , ) -> str:
super().__init__(**_A )
__magic_name__ : Optional[Any] = num_channels
__magic_name__ : List[str] = image_size
__magic_name__ : Any = width_coefficient
__magic_name__ : Optional[Any] = depth_coefficient
__magic_name__ : Optional[Any] = depth_divisor
__magic_name__ : List[Any] = kernel_sizes
__magic_name__ : Optional[int] = in_channels
__magic_name__ : str = out_channels
__magic_name__ : List[Any] = depthwise_padding
__magic_name__ : Any = strides
__magic_name__ : Optional[int] = num_block_repeats
__magic_name__ : Tuple = expand_ratios
__magic_name__ : int = squeeze_expansion_ratio
__magic_name__ : Any = hidden_act
__magic_name__ : Union[str, Any] = hidden_dim
__magic_name__ : Optional[int] = pooling_type
__magic_name__ : Tuple = initializer_range
__magic_name__ : List[Any] = batch_norm_eps
__magic_name__ : List[str] = batch_norm_momentum
__magic_name__ : Optional[int] = drop_connect_rate
__magic_name__ : int = sum(_A ) * 4
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , _A : Union[str, os.PathLike] , **_A : Optional[int] ) -> "PretrainedConfig":
cls._set_token_in_kwargs(_A )
__magic_name__ , __magic_name__ : List[Any] = cls.get_config_dict(_A , **_A )
# get the vision config dict if we are loading from AlignConfig
if config_dict.get('model_type' ) == "align":
__magic_name__ : Optional[Any] = config_dict['vision_config']
if "model_type" in config_dict and hasattr(cls , 'model_type' ) and config_dict["model_type"] != cls.model_type:
logger.warning(
F'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
F'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_A , **_A )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = """align"""
A_ : int = True
def __init__( self : List[str] , _A : Union[str, Any]=None , _A : Optional[int]=None , _A : Union[str, Any]=640 , _A : Tuple=1.0 , _A : Any=0.02 , **_A : Dict , ) -> str:
super().__init__(**_A )
if text_config is None:
__magic_name__ : Optional[int] = {}
logger.info('text_config is None. Initializing the AlignTextConfig with default values.' )
if vision_config is None:
__magic_name__ : int = {}
logger.info('vision_config is None. Initializing the AlignVisionConfig with default values.' )
__magic_name__ : int = AlignTextConfig(**_A )
__magic_name__ : Optional[int] = AlignVisionConfig(**_A )
__magic_name__ : Any = projection_dim
__magic_name__ : int = temperature_init_value
__magic_name__ : Tuple = initializer_range
@classmethod
def __lowerCAmelCase ( cls : Optional[int] , _A : AlignTextConfig , _A : AlignVisionConfig , **_A : Dict ) -> Tuple:
return cls(text_config=text_config.to_dict() , vision_config=vision_config.to_dict() , **_A )
def __lowerCAmelCase ( self : Tuple ) -> int:
__magic_name__ : Optional[int] = copy.deepcopy(self.__dict__ )
__magic_name__ : Union[str, Any] = self.text_config.to_dict()
__magic_name__ : Any = self.vision_config.to_dict()
__magic_name__ : List[Any] = self.__class__.model_type
return output | 331 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict:
__magic_name__ : Union[str, Any] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Optional[int] = seq_length
__magic_name__ : List[str] = is_training
__magic_name__ : Optional[Any] = use_input_mask
__magic_name__ : Dict = use_token_type_ids
__magic_name__ : str = use_labels
__magic_name__ : int = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : Dict = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Any = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Any = type_vocab_size
__magic_name__ : Union[str, Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : str = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : Any = relative_attention
__magic_name__ : str = position_biased_input
__magic_name__ : str = pos_att_type
__magic_name__ : Union[str, Any] = scope
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_input_mask:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__magic_name__ : int = None
if self.use_token_type_ids:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] = None
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
if self.use_labels:
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.get_config()
__magic_name__ : Union[str, Any] = 300
return config
def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]:
__magic_name__ : Dict = DebertaModel(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0]
__magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0]
__magic_name__ : List[str] = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict:
__magic_name__ : List[str] = DebertaForMaskedLM(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]:
__magic_name__ : Optional[int] = self.num_labels
__magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A )
model.to(_A )
model.eval()
__magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]:
__magic_name__ : str = self.num_labels
__magic_name__ : int = DebertaForTokenClassification(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]:
__magic_name__ : int = DebertaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[int] = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : int = config_and_inputs
__magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Tuple = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Union[str, Any] = True
A_ : Any = False
A_ : Dict = False
A_ : str = False
A_ : Dict = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : List[str] = DebertaModelTester(self )
__magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : Any ) -> str:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def __lowerCAmelCase ( self : str ) -> List[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : int = DebertaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
pass
@slow
def __lowerCAmelCase ( self : Dict ) -> Tuple:
__magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' )
__magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0]
# compare the actual values for a slice.
__magic_name__ : Tuple = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' ) | 331 | 1 |
'''simple docstring'''
from itertools import product
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : Tuple = sides_number
__magic_name__ : List[Any] = max_face_number * dice_number
__magic_name__ : Any = [0] * (max_total + 1)
__magic_name__ : int = 1
__magic_name__ : Optional[int] = range(lowerCAmelCase , max_face_number + 1 )
for dice_numbers in product(lowerCAmelCase , repeat=lowerCAmelCase ):
__magic_name__ : int = sum(lowerCAmelCase )
totals_frequencies[total] += 1
return totals_frequencies
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = total_frequency_distribution(
sides_number=4 , dice_number=9 )
__magic_name__ : Dict = total_frequency_distribution(
sides_number=6 , dice_number=6 )
__magic_name__ : List[str] = 0
__magic_name__ : str = 9
__magic_name__ : Optional[int] = 4 * 9
__magic_name__ : Union[str, Any] = 6
for peter_total in range(lowerCAmelCase , max_peter_total + 1 ):
peter_wins_count += peter_totals_frequencies[peter_total] * sum(
colin_totals_frequencies[min_colin_total:peter_total] )
__magic_name__ : Any = (4**9) * (6**6)
__magic_name__ : Optional[Any] = peter_wins_count / total_games_number
__magic_name__ : str = round(lowerCAmelCase , ndigits=7 )
return rounded_peter_win_probability
if __name__ == "__main__":
print(F'{solution() = }') | 331 |
'''simple docstring'''
class _lowerCamelCase : # Public class to implement a graph
'''simple docstring'''
def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
__magic_name__ : Tuple = row
__magic_name__ : str = col
__magic_name__ : Optional[Any] = graph
def __lowerCAmelCase ( self : Any , _A : int , _A : int , _A : list[list[bool]] ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
# Checking all 8 elements surrounding nth element
__magic_name__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__magic_name__ : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1]
__magic_name__ : Optional[int] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A )
def __lowerCAmelCase ( self : int ) -> int: # And finally, count all islands.
__magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )]
__magic_name__ : Any = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_A , _A , _A )
count += 1
return count | 331 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase :Tuple = {
'''configuration_time_series_transformer''': [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP''',
'''TimeSeriesTransformerConfig''',
],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Tuple = [
'''TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TimeSeriesTransformerForPrediction''',
'''TimeSeriesTransformerModel''',
'''TimeSeriesTransformerPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP,
TimeSeriesTransformerConfig,
)
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_time_series_transformer import (
TIME_SERIES_TRANSFORMER_PRETRAINED_MODEL_ARCHIVE_LIST,
TimeSeriesTransformerForPrediction,
TimeSeriesTransformerModel,
TimeSeriesTransformerPreTrainedModel,
)
else:
import sys
lowerCAmelCase :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | 1 |
'''simple docstring'''
import os
import tempfile
import unittest
from pathlib import Path
from transformers import AutoConfig, is_torch_available
from transformers.testing_utils import require_torch, torch_device
if is_torch_available():
from transformers import PyTorchBenchmark, PyTorchBenchmarkArguments
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int , _A : List[Any] ) -> Any:
for model_result in results.values():
for batch_size, sequence_length in zip(model_result['bs'] , model_result['ss'] ):
__magic_name__ : Union[str, Any] = model_result['result'][batch_size][sequence_length]
self.assertIsNotNone(_A )
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
__magic_name__ : Tuple = 'sshleifer/tiny-gpt2'
__magic_name__ : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
__magic_name__ : Tuple = PyTorchBenchmark(_A )
__magic_name__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self : List[Any] ) -> str:
__magic_name__ : List[str] = 'sgugger/tiny-distilbert-classification'
__magic_name__ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , only_pretrain_model=_A , )
__magic_name__ : List[Any] = PyTorchBenchmark(_A )
__magic_name__ : Dict = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
__magic_name__ : List[Any] = 'sshleifer/tiny-gpt2'
__magic_name__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , torchscript=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
__magic_name__ : int = PyTorchBenchmark(_A )
__magic_name__ : Union[str, Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
@unittest.skipIf(torch_device == 'cpu' , 'Cant do half precision' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
__magic_name__ : Optional[int] = 'sshleifer/tiny-gpt2'
__magic_name__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , fpaa=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
__magic_name__ : Any = PyTorchBenchmark(_A )
__magic_name__ : int = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
__magic_name__ : List[str] = 'sshleifer/tiny-gpt2'
__magic_name__ : Optional[int] = AutoConfig.from_pretrained(_A )
# set architectures equal to `None`
__magic_name__ : List[Any] = None
__magic_name__ : Any = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
__magic_name__ : List[Any] = PyTorchBenchmark(_A , configs=[config] )
__magic_name__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self : List[str] ) -> int:
__magic_name__ : Any = 'sshleifer/tiny-gpt2'
__magic_name__ : Tuple = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
__magic_name__ : Optional[int] = PyTorchBenchmark(_A )
__magic_name__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
@unittest.skipIf(torch_device == 'cpu' , 'Can\'t do half precision' )
def __lowerCAmelCase ( self : str ) -> Dict:
__magic_name__ : Dict = 'sshleifer/tiny-gpt2'
__magic_name__ : Optional[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , fpaa=_A , multi_process=_A , )
__magic_name__ : Any = PyTorchBenchmark(_A )
__magic_name__ : Optional[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCAmelCase ( self : Tuple ) -> Dict:
__magic_name__ : Optional[int] = 'sshleifer/tiny-gpt2'
__magic_name__ : List[Any] = AutoConfig.from_pretrained(_A )
__magic_name__ : str = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
__magic_name__ : str = PyTorchBenchmark(_A , configs=[config] )
__magic_name__ : List[str] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : Dict = 'sshleifer/tinier_bart'
__magic_name__ : int = AutoConfig.from_pretrained(_A )
__magic_name__ : List[Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
__magic_name__ : Optional[Any] = PyTorchBenchmark(_A , configs=[config] )
__magic_name__ : Optional[int] = benchmark.run()
self.check_results_dict_not_empty(results.time_inference_result )
self.check_results_dict_not_empty(results.memory_inference_result )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
__magic_name__ : Tuple = 'sshleifer/tiny-gpt2'
__magic_name__ : Optional[int] = AutoConfig.from_pretrained(_A )
__magic_name__ : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
__magic_name__ : Tuple = PyTorchBenchmark(_A , configs=[config] )
__magic_name__ : List[Any] = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Dict = 'sshleifer/tinier_bart'
__magic_name__ : Optional[int] = AutoConfig.from_pretrained(_A )
__magic_name__ : Union[str, Any] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , multi_process=_A , )
__magic_name__ : int = PyTorchBenchmark(_A , configs=[config] )
__magic_name__ : str = benchmark.run()
self.check_results_dict_not_empty(results.time_train_result )
self.check_results_dict_not_empty(results.memory_train_result )
def __lowerCAmelCase ( self : Tuple ) -> str:
__magic_name__ : Optional[int] = 'sshleifer/tiny-gpt2'
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[int] = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , save_to_csv=_A , sequence_lengths=[8] , batch_sizes=[1] , inference_time_csv_file=os.path.join(_A , 'inf_time.csv' ) , train_memory_csv_file=os.path.join(_A , 'train_mem.csv' ) , inference_memory_csv_file=os.path.join(_A , 'inf_mem.csv' ) , train_time_csv_file=os.path.join(_A , 'train_time.csv' ) , env_info_csv_file=os.path.join(_A , 'env.csv' ) , multi_process=_A , )
__magic_name__ : int = PyTorchBenchmark(_A )
benchmark.run()
self.assertTrue(Path(os.path.join(_A , 'inf_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_A , 'train_time.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_A , 'inf_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_A , 'train_mem.csv' ) ).exists() )
self.assertTrue(Path(os.path.join(_A , 'env.csv' ) ).exists() )
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = 'sshleifer/tiny-gpt2'
def _check_summary_is_not_empty(_A : Dict ):
self.assertTrue(hasattr(_A , 'sequential' ) )
self.assertTrue(hasattr(_A , 'cumulative' ) )
self.assertTrue(hasattr(_A , 'current' ) )
self.assertTrue(hasattr(_A , 'total' ) )
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : int = PyTorchBenchmarkArguments(
models=[MODEL_ID] , training=_A , inference=_A , sequence_lengths=[8] , batch_sizes=[1] , log_filename=os.path.join(_A , 'log.txt' ) , log_print=_A , trace_memory_line_by_line=_A , multi_process=_A , )
__magic_name__ : List[str] = PyTorchBenchmark(_A )
__magic_name__ : Optional[Any] = benchmark.run()
_check_summary_is_not_empty(result.inference_summary )
_check_summary_is_not_empty(result.train_summary )
self.assertTrue(Path(os.path.join(_A , 'log.txt' ) ).exists() ) | 331 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ):
"""simple docstring"""
__magic_name__ : list[int] = [0]
__magic_name__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__magic_name__ : int = 0
# the area corresponding to the grid that gives the product closest to target
__magic_name__ : int = 0
# an estimate of b, using the quadratic formula
__magic_name__ : float
# the largest integer less than b_estimate
__magic_name__ : int
# the largest integer less than b_estimate
__magic_name__ : int
# the triangle number corresponding to b_floor
__magic_name__ : int
# the triangle number corresponding to b_ceil
__magic_name__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__magic_name__ : List[Any] = floor(lowerCAmelCase )
__magic_name__ : Dict = ceil(lowerCAmelCase )
__magic_name__ : Any = triangle_numbers[b_floor]
__magic_name__ : Optional[int] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : Any = triangle_b_first_guess * triangle_a
__magic_name__ : Any = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : List[str] = triangle_b_second_guess * triangle_a
__magic_name__ : Optional[int] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'{solution() = }') | 331 | 1 |
'''simple docstring'''
import json
import os
from functools import lru_cache
from typing import Dict, List, Optional, Tuple, Union
import regex as re
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...tokenization_utils_base import BatchEncoding, EncodedInput
from ...utils import PaddingStrategy, logging
lowerCAmelCase :int = logging.get_logger(__name__)
lowerCAmelCase :Any = {'''vocab_file''': '''vocab.json''', '''merges_file''': '''merges.txt'''}
# See all LED models at https://huggingface.co/models?filter=LED
lowerCAmelCase :List[str] = {
'''vocab_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/vocab.json''',
},
'''merges_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/merges.txt''',
},
'''tokenizer_file''': {
'''allenai/led-base-16384''': '''https://huggingface.co/allenai/led-base-16384/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase :Optional[int] = {
'''allenai/led-base-16384''': 1_6_3_8_4,
}
@lru_cache()
# Copied from transformers.models.bart.tokenization_bart.bytes_to_unicode
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = (
list(range(ord('!' ) , ord('~' ) + 1 ) ) + list(range(ord('¡' ) , ord('¬' ) + 1 ) ) + list(range(ord('®' ) , ord('ÿ' ) + 1 ) )
)
__magic_name__ : Union[str, Any] = bs[:]
__magic_name__ : Optional[Any] = 0
for b in range(2**8 ):
if b not in bs:
bs.append(lowerCAmelCase )
cs.append(2**8 + n )
n += 1
__magic_name__ : int = [chr(lowerCAmelCase ) for n in cs]
return dict(zip(lowerCAmelCase , lowerCAmelCase ) )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = set()
__magic_name__ : str = word[0]
for char in word[1:]:
pairs.add((prev_char, char) )
__magic_name__ : int = char
return pairs
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Dict = VOCAB_FILES_NAMES
A_ : Dict = PRETRAINED_VOCAB_FILES_MAP
A_ : int = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Optional[Any] = ["""input_ids""", """attention_mask"""]
def __init__( self : str , _A : Optional[Any] , _A : int , _A : int="replace" , _A : Union[str, Any]="<s>" , _A : Union[str, Any]="</s>" , _A : Any="</s>" , _A : Optional[Any]="<s>" , _A : List[Any]="<unk>" , _A : List[Any]="<pad>" , _A : Union[str, Any]="<mask>" , _A : str=False , **_A : Optional[int] , ) -> Optional[Any]:
__magic_name__ : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else bos_token
__magic_name__ : Union[str, Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else eos_token
__magic_name__ : Tuple = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else sep_token
__magic_name__ : str = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else cls_token
__magic_name__ : Optional[Any] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else unk_token
__magic_name__ : Dict = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else pad_token
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Optional[int] = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
errors=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , cls_token=_A , pad_token=_A , mask_token=_A , add_prefix_space=_A , **_A , )
with open(_A , encoding='utf-8' ) as vocab_handle:
__magic_name__ : Optional[Any] = json.load(_A )
__magic_name__ : str = {v: k for k, v in self.encoder.items()}
__magic_name__ : int = errors # how to handle errors in decoding
__magic_name__ : List[Any] = bytes_to_unicode()
__magic_name__ : Union[str, Any] = {v: k for k, v in self.byte_encoder.items()}
with open(_A , encoding='utf-8' ) as merges_handle:
__magic_name__ : List[Any] = merges_handle.read().split('\n' )[1:-1]
__magic_name__ : List[Any] = [tuple(merge.split() ) for merge in bpe_merges]
__magic_name__ : str = dict(zip(_A , range(len(_A ) ) ) )
__magic_name__ : List[Any] = {}
__magic_name__ : int = add_prefix_space
# Should have added re.IGNORECASE so BPE merges can happen for capitalized versions of contractions
__magic_name__ : str = re.compile(R'\'s|\'t|\'re|\'ve|\'m|\'ll|\'d| ?\p{L}+| ?\p{N}+| ?[^\s\p{L}\p{N}]+|\s+(?!\S)|\s+' )
@property
# Copied from transformers.models.bart.tokenization_bart.BartTokenizer.vocab_size
def __lowerCAmelCase ( self : Any ) -> Any:
return len(self.encoder )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
return dict(self.encoder , **self.added_tokens_encoder )
def __lowerCAmelCase ( self : Optional[int] , _A : List[str] ) -> Dict:
if token in self.cache:
return self.cache[token]
__magic_name__ : List[Any] = tuple(_A )
__magic_name__ : List[str] = get_pairs(_A )
if not pairs:
return token
while True:
__magic_name__ : Optional[Any] = min(_A , key=lambda _A : self.bpe_ranks.get(_A , float('inf' ) ) )
if bigram not in self.bpe_ranks:
break
__magic_name__ , __magic_name__ : Optional[Any] = bigram
__magic_name__ : Union[str, Any] = []
__magic_name__ : Union[str, Any] = 0
while i < len(_A ):
try:
__magic_name__ : Dict = word.index(_A , _A )
except ValueError:
new_word.extend(word[i:] )
break
else:
new_word.extend(word[i:j] )
__magic_name__ : Union[str, Any] = j
if word[i] == first and i < len(_A ) - 1 and word[i + 1] == second:
new_word.append(first + second )
i += 2
else:
new_word.append(word[i] )
i += 1
__magic_name__ : str = tuple(_A )
__magic_name__ : List[str] = new_word
if len(_A ) == 1:
break
else:
__magic_name__ : int = get_pairs(_A )
__magic_name__ : Tuple = ' '.join(_A )
__magic_name__ : Union[str, Any] = word
return word
def __lowerCAmelCase ( self : Union[str, Any] , _A : List[str] ) -> str:
__magic_name__ : List[str] = []
for token in re.findall(self.pat , _A ):
__magic_name__ : int = ''.join(
self.byte_encoder[b] for b in token.encode('utf-8' ) ) # Maps all our bytes to unicode strings, avoiding control tokens of the BPE (spaces in our case)
bpe_tokens.extend(bpe_token for bpe_token in self.bpe(_A ).split(' ' ) )
return bpe_tokens
def __lowerCAmelCase ( self : Optional[int] , _A : Dict ) -> List[Any]:
return self.encoder.get(_A , self.encoder.get(self.unk_token ) )
def __lowerCAmelCase ( self : Dict , _A : Dict ) -> Any:
return self.decoder.get(_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[Any] ) -> Tuple:
__magic_name__ : Dict = ''.join(_A )
__magic_name__ : Union[str, Any] = bytearray([self.byte_decoder[c] for c in text] ).decode('utf-8' , errors=self.errors )
return text
def __lowerCAmelCase ( self : int , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : Any = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
__magic_name__ : Dict = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['merges_file'] )
with open(_A , 'w' , encoding='utf-8' ) as f:
f.write(json.dumps(self.encoder , indent=2 , sort_keys=_A , ensure_ascii=_A ) + '\n' )
__magic_name__ : str = 0
with open(_A , 'w' , encoding='utf-8' ) as writer:
writer.write('#version: 0.2\n' )
for bpe_tokens, token_index in sorted(self.bpe_ranks.items() , key=lambda _A : kv[1] ):
if index != token_index:
logger.warning(
F'Saving vocabulary to {merge_file}: BPE merge indices are not consecutive.'
' Please check that the tokenizer is not corrupted!' )
__magic_name__ : Tuple = token_index
writer.write(' '.join(_A ) + '\n' )
index += 1
return vocab_file, merge_file
def __lowerCAmelCase ( self : Union[str, Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
__magic_name__ : Any = [self.cls_token_id]
__magic_name__ : Optional[Any] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def __lowerCAmelCase ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None , _A : bool = False ) -> List[int]:
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_A , token_ids_a=_A , already_has_special_tokens=_A )
if token_ids_a is None:
return [1] + ([0] * len(_A )) + [1]
return [1] + ([0] * len(_A )) + [1, 1] + ([0] * len(_A )) + [1]
def __lowerCAmelCase ( self : str , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
__magic_name__ : List[str] = [self.sep_token_id]
__magic_name__ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
def __lowerCAmelCase ( self : int , _A : Any , _A : Any=False , **_A : List[str] ) -> List[str]:
__magic_name__ : int = kwargs.pop('add_prefix_space' , self.add_prefix_space )
if (is_split_into_words or add_prefix_space) and (len(_A ) > 0 and not text[0].isspace()):
__magic_name__ : Union[str, Any] = ' ' + text
return (text, kwargs)
def __lowerCAmelCase ( self : Any , _A : Union[Dict[str, EncodedInput], BatchEncoding] , _A : Optional[int] = None , _A : PaddingStrategy = PaddingStrategy.DO_NOT_PAD , _A : Optional[int] = None , _A : Optional[bool] = None , ) -> dict:
__magic_name__ : int = super()._pad(
encoded_inputs=_A , max_length=_A , padding_strategy=_A , pad_to_multiple_of=_A , return_attention_mask=_A , )
# Load from model defaults
if return_attention_mask is None:
__magic_name__ : Any = 'attention_mask' in self.model_input_names
if return_attention_mask and "global_attention_mask" in encoded_inputs:
__magic_name__ : Dict = encoded_inputs[self.model_input_names[0]]
# `global_attention_mask` need to have the same length as other (sequential) inputs.
__magic_name__ : Optional[int] = len(encoded_inputs['global_attention_mask'] ) != len(_A )
if needs_to_be_padded:
__magic_name__ : Optional[int] = len(_A ) - len(encoded_inputs['global_attention_mask'] )
if self.padding_side == "right":
# Use `-1` since `0` in `global_attention_mask` means `local attention` instead of `not to attend`
__magic_name__ : Optional[Any] = (
encoded_inputs['global_attention_mask'] + [-1] * difference
)
elif self.padding_side == "left":
__magic_name__ : Any = [-1] * difference + encoded_inputs[
'global_attention_mask'
]
else:
raise ValueError('Invalid padding strategy:' + str(self.padding_side ) )
return encoded_inputs | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Dict = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Tuple = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Any = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 331 | 1 |
'''simple docstring'''
from math import ceil
from typing import List, Optional, Union
import numpy as np
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import BatchFeature, SequenceFeatureExtractor
from ...utils import TensorType, logging
lowerCAmelCase :str = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = ["""audio_values""", """audio_mask"""]
def __init__( self : str , _A : Any=2048 , _A : List[str]=1 , _A : Optional[Any]=[16, 16] , _A : Tuple=128 , _A : Any=44100 , _A : List[Any]=86 , _A : Union[str, Any]=2048 , _A : int=0.0 , **_A : str , ) -> Dict:
super().__init__(
feature_size=_A , sampling_rate=_A , padding_value=_A , **_A , )
__magic_name__ : List[Any] = spectrogram_length
__magic_name__ : Optional[int] = num_channels
__magic_name__ : List[Any] = patch_size
__magic_name__ : int = feature_size // self.patch_size[1]
__magic_name__ : int = n_fft
__magic_name__ : List[Any] = sampling_rate // hop_length_to_sampling_rate
__magic_name__ : Optional[int] = sampling_rate
__magic_name__ : List[Any] = padding_value
__magic_name__ : List[Any] = mel_filter_bank(
num_frequency_bins=1 + n_fft // 2 , num_mel_filters=_A , min_frequency=0.0 , max_frequency=2_2050.0 , sampling_rate=_A , norm='slaney' , mel_scale='slaney' , ).T
def __lowerCAmelCase ( self : Union[str, Any] , _A : np.array ) -> np.ndarray:
__magic_name__ : Optional[int] = spectrogram(
_A , window_function(self.n_fft , 'hann' ) , frame_length=self.n_fft , hop_length=self.hop_length , power=2.0 , mel_filters=self.mel_filters.T , log_mel='dB' , db_range=80.0 , )
__magic_name__ : Optional[Any] = log_spec[:, :-1]
__magic_name__ : List[str] = log_spec - 20.0
__magic_name__ : Union[str, Any] = np.clip(log_spec / 40.0 , -2.0 , 0.0 ) + 1.0
return log_spec
def __call__( self : List[Any] , _A : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] , _A : Optional[Union[str, TensorType]] = None , _A : Optional[bool] = True , _A : Optional[int] = None , _A : bool = False , _A : bool = False , **_A : Union[str, Any] , ) -> BatchFeature:
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
'This feature extractor is set to support sampling rate'
F' of {self.sampling_rate}. Please make sure that the provided `raw_speech` input was sampled'
F' with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
'It is strongly recommended to pass the `sampling_rate` argument to this function. '
'Failing to do so can result in silent errors that might be hard to debug.' )
__magic_name__ : Tuple = isinstance(_A , np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(F'Only mono-channel audio is supported for input to {self}' )
__magic_name__ : Dict = is_batched_numpy or (
isinstance(_A , (list, tuple) ) and (isinstance(raw_speech[0] , (np.ndarray, tuple, list) ))
)
if is_batched:
__magic_name__ : Tuple = [np.asarray([speech] , dtype=np.floataa ).T for speech in raw_speech]
elif not is_batched and not isinstance(_A , np.ndarray ):
__magic_name__ : str = np.asarray(_A , dtype=np.floataa )
elif isinstance(_A , np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
__magic_name__ : Optional[Any] = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
__magic_name__ : Optional[Any] = [np.asarray([raw_speech] ).T]
# Convert audio signals to log mel spectrograms, truncate by time axis
__magic_name__ : str = [
self._np_extract_fbank_features(waveform.squeeze() ).T[: self.spectrogram_length] for waveform in raw_speech
]
if isinstance(audio_features[0] , _A ):
__magic_name__ : List[str] = [np.asarray(_A , dtype=np.floataa ) for feature in audio_features]
# Create audio attention mask
__magic_name__ : Tuple = max(
[ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len for feature in audio_features] ) # The maximum number of audio patches in a batch
if return_attention_mask:
__magic_name__ : Union[str, Any] = [
(ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [1]
+ (max_patch_len - ceil(feature.shape[0] / self.patch_size[0] ) * self.freq_len) * [0]
for feature in audio_features
]
__magic_name__ : Any = np.array(_A ).astype(np.floataa )
# convert into correct format for padding
__magic_name__ : Optional[int] = max_patch_len // self.freq_len * self.patch_size[0] # The maximum audio size in a batch
__magic_name__ : Optional[Any] = np.ones([len(_A ), 1, max_time_len, self.feature_size] ).astype(np.floataa )
__magic_name__ : List[str] = padded_audio_features * self.padding_value
for i in range(len(_A ) ):
__magic_name__ : Tuple = audio_features[i]
__magic_name__ : List[str] = feature
# return as BatchFeature
if return_attention_mask:
__magic_name__ : Optional[int] = {'audio_values': padded_audio_features, 'audio_mask': audio_mask}
else:
__magic_name__ : Union[str, Any] = {'audio_values': padded_audio_features}
__magic_name__ : List[Any] = BatchFeature(data=_A , tensor_type=_A )
return encoded_inputs | 331 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = ["""pixel_values"""]
def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None:
super().__init__(**_A )
__magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384}
__magic_name__ : Dict = get_size_dict(_A , default_to_square=_A )
__magic_name__ : List[Any] = do_resize
__magic_name__ : str = size
# Default value set here for backwards compatibility where the value in config is None
__magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256
__magic_name__ : int = resample
__magic_name__ : List[str] = do_rescale
__magic_name__ : List[Any] = rescale_factor
__magic_name__ : str = do_normalize
__magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
__magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
__magic_name__ : Dict = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__magic_name__ : Dict = int(shortest_edge / crop_pct )
__magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
__magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int:
return rescale(_A , scale=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image:
__magic_name__ : int = do_resize if do_resize is not None else self.do_resize
__magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
__magic_name__ : Optional[Any] = resample if resample is not None else self.resample
__magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : str = image_mean if image_mean is not None else self.image_mean
__magic_name__ : Dict = image_std if image_std is not None else self.image_std
__magic_name__ : Dict = size if size is not None else self.size
__magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A )
__magic_name__ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
__magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images]
if do_rescale:
__magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
__magic_name__ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A ) | 331 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_torch_available,
)
lowerCAmelCase :List[str] = {
'''configuration_gpt_bigcode''': ['''GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''GPTBigCodeConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :List[Any] = [
'''GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''GPTBigCodeForSequenceClassification''',
'''GPTBigCodeForTokenClassification''',
'''GPTBigCodeForCausalLM''',
'''GPTBigCodeModel''',
'''GPTBigCodePreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_gpt_bigcode import GPT_BIGCODE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTBigCodeConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_bigcode import (
GPT_BIGCODE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTBigCodeForCausalLM,
GPTBigCodeForSequenceClassification,
GPTBigCodeForTokenClassification,
GPTBigCodeModel,
GPTBigCodePreTrainedModel,
)
else:
import sys
lowerCAmelCase :Tuple = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase :Tuple = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s
lowerCAmelCase :Union[str, Any] = 3E8 # unit of c : m * s^-1
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
__magic_name__ : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__magic_name__ : Optional[int] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__magic_name__ : Union[str, Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod() | 331 | 1 |
'''simple docstring'''
import argparse
import pickle
import numpy as np
import torch
from torch import nn
from transformers import ReformerConfig, ReformerModelWithLMHead
from transformers.utils import logging
logging.set_verbosity_info()
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Any , lowerCAmelCase : List[str]=None ):
"""simple docstring"""
assert torch_layer.weight.shape == weight.shape, f'{torch_layer} layer.weight does not match'
__magic_name__ : int = nn.Parameter(lowerCAmelCase )
if bias is not None:
assert torch_layer.bias.shape == bias.shape, f'{torch_layer} layer.bias does not match'
__magic_name__ : Dict = nn.Parameter(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__magic_name__ : Optional[int] = np.asarray(weights[0] )
__magic_name__ : Optional[Any] = np.asarray(weights[1] )
__magic_name__ : Optional[Any] = np.asarray(weights[2] )
set_param(
torch_layer.self_attention.query_key , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase ).view(-1 , lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
__magic_name__ : str = np.asarray(weights[0] )
__magic_name__ : Tuple = np.asarray(weights[1] )
__magic_name__ : Optional[int] = np.asarray(weights[2] )
__magic_name__ : int = np.asarray(weights[3] )
set_param(
torch_layer.self_attention.query , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.key , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , )
set_param(
torch_layer.self_attention.value , torch.tensor(lowerCAmelCase ).transpose(1 , 2 ).contiguous().view(-1 , lowerCAmelCase ) , )
set_param(
torch_layer.output.dense , torch.tensor(lowerCAmelCase ).view(-1 , lowerCAmelCase ).contiguous().transpose(0 , 1 ) , )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Optional[int] , lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : Optional[int] = weights[0][0][0]
__magic_name__ : int = np.asarray(layer_norm_a[0] )
__magic_name__ : List[Any] = np.asarray(layer_norm_a[1] )
set_param(
torch_block.attention.layer_norm , torch.tensor(lowerCAmelCase ) , torch.tensor(lowerCAmelCase ) , )
# lsh weights + output
__magic_name__ : int = weights[0][1]
if len(lowerCAmelCase ) < 4:
set_layer_weights_in_torch_lsh(lowerCAmelCase , torch_block.attention , lowerCAmelCase )
else:
set_layer_weights_in_torch_local(lowerCAmelCase , torch_block.attention , lowerCAmelCase )
# intermediate weighs
__magic_name__ : Optional[Any] = weights[2][0][1][2]
# Chunked Feed Forward
if len(lowerCAmelCase ) == 4:
__magic_name__ : str = intermediate_weights[2]
# layernorm 2
__magic_name__ : Any = np.asarray(intermediate_weights[0][0] )
__magic_name__ : Dict = np.asarray(intermediate_weights[0][1] )
set_param(
torch_block.feed_forward.layer_norm , torch.tensor(lowerCAmelCase ) , torch.tensor(lowerCAmelCase ) , )
# intermediate dense
__magic_name__ : List[Any] = np.asarray(intermediate_weights[1][0] )
__magic_name__ : List[str] = np.asarray(intermediate_weights[1][1] )
set_param(
torch_block.feed_forward.dense.dense , torch.tensor(lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase ) , )
# intermediate out
__magic_name__ : int = np.asarray(intermediate_weights[4][0] )
__magic_name__ : List[str] = np.asarray(intermediate_weights[4][1] )
set_param(
torch_block.feed_forward.output.dense , torch.tensor(lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase ) , )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Union[str, Any] , lowerCAmelCase : Any ):
"""simple docstring"""
__magic_name__ : Tuple = torch_model.reformer
# word embeds
__magic_name__ : List[Any] = np.asarray(weights[1] )
set_param(
torch_model_reformer.embeddings.word_embeddings , torch.tensor(lowerCAmelCase ) , )
if isinstance(weights[3] , lowerCAmelCase ):
__magic_name__ : Optional[int] = torch_model_reformer.embeddings.position_embeddings
for emb_idx in range(len(position_embeddings.weights ) ):
__magic_name__ : Any = np.asarray(weights[3][emb_idx][0] )
assert (
position_embeddings.weights[emb_idx].shape == emb_weights.shape
), f'{position_embeddings[emb_idx]} emb does not match'
__magic_name__ : List[str] = nn.Parameter(torch.tensor(lowerCAmelCase ) )
__magic_name__ : List[Any] = weights[5]
assert len(torch_model_reformer.encoder.layers ) * 4 == len(
lowerCAmelCase ), "HF and trax model do not have the same number of layers"
for layer_idx, layer in enumerate(torch_model_reformer.encoder.layers ):
__magic_name__ : Optional[Any] = trax_layer_weights[4 * layer_idx : 4 * (layer_idx + 1)]
set_block_weights_in_torch(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
# output layer norm
__magic_name__ : str = np.asarray(weights[7][0] )
__magic_name__ : Optional[int] = np.asarray(weights[7][1] )
set_param(
torch_model_reformer.encoder.layer_norm , torch.tensor(lowerCAmelCase ) , torch.tensor(lowerCAmelCase ) , )
# output embeddings
__magic_name__ : Dict = np.asarray(weights[9][0] )
__magic_name__ : List[Any] = np.asarray(weights[9][1] )
set_param(
torch_model.lm_head.decoder , torch.tensor(lowerCAmelCase ).transpose(0 , 1 ).contiguous() , torch.tensor(lowerCAmelCase ) , )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : List[Any] ):
"""simple docstring"""
__magic_name__ : Optional[int] = ReformerConfig.from_json_file(lowerCAmelCase )
print(f'Building PyTorch model from configuration: {config}' )
__magic_name__ : Optional[Any] = ReformerModelWithLMHead(lowerCAmelCase )
with open(lowerCAmelCase , 'rb' ) as f:
__magic_name__ : str = pickle.load(lowerCAmelCase )['weights']
set_model_weights_in_torch(lowerCAmelCase , lowerCAmelCase , config.hidden_size )
# Save pytorch-model
print(f'Save PyTorch model to {pytorch_dump_path}' )
torch.save(model.state_dict() , lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--trax_model_pkl_path''', default=None, type=str, required=True, help='''Path to the TensorFlow checkpoint path.'''
)
parser.add_argument(
'''--config_file''',
default=None,
type=str,
required=True,
help=(
'''The config json file corresponding to the pre-trained Reformer model. \n'''
'''This specifies the model architecture.'''
),
)
parser.add_argument(
'''--pytorch_dump_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase :str = parser.parse_args()
convert_trax_checkpoint_to_pytorch(args.trax_model_pkl_path, args.config_file, args.pytorch_dump_path) | 331 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase :Tuple = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase :List[Any] = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase :Optional[Any] = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase :Union[str, Any] = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase :Tuple = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) )
__magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
__magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase ( lowerCAmelCase : int = 100 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Any = PokerHand(lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS]
__magic_name__ : Tuple = poker_hands.copy()
shuffle(lowerCAmelCase )
__magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) )
for index, hand in enumerate(lowerCAmelCase ):
assert hand == poker_hands[index]
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' )
__magic_name__ : Optional[Any] = True
__magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = 0
__magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) )
__magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' )
with open(lowerCAmelCase ) as file_hand:
for line in file_hand:
__magic_name__ : Optional[int] = line[:14].strip()
__magic_name__ : List[Any] = line[15:].strip()
__magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase )
__magic_name__ : List[Any] = player.compare_with(lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 376 | 331 | 1 |
'''simple docstring'''
from typing import List
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase :Union[str, Any] = logging.get_logger(__name__)
lowerCAmelCase :Optional[int] = {
'''snap-research/efficientformer-l1-300''': (
'''https://huggingface.co/snap-research/efficientformer-l1-300/resolve/main/config.json'''
),
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = """efficientformer"""
def __init__( self : str , _A : List[int] = [3, 2, 6, 4] , _A : List[int] = [48, 96, 224, 448] , _A : List[bool] = [True, True, True, True] , _A : int = 448 , _A : int = 32 , _A : int = 4 , _A : int = 7 , _A : int = 5 , _A : int = 8 , _A : int = 4 , _A : float = 0.0 , _A : int = 16 , _A : int = 3 , _A : int = 3 , _A : int = 3 , _A : int = 2 , _A : int = 1 , _A : float = 0.0 , _A : int = 1 , _A : bool = True , _A : bool = True , _A : float = 1E-5 , _A : str = "gelu" , _A : float = 0.02 , _A : float = 1E-12 , _A : int = 224 , _A : float = 1E-05 , **_A : Any , ) -> None:
super().__init__(**_A )
__magic_name__ : str = hidden_act
__magic_name__ : Tuple = hidden_dropout_prob
__magic_name__ : Any = hidden_sizes
__magic_name__ : List[str] = num_hidden_layers
__magic_name__ : str = num_attention_heads
__magic_name__ : List[str] = initializer_range
__magic_name__ : List[str] = layer_norm_eps
__magic_name__ : Any = patch_size
__magic_name__ : Optional[Any] = num_channels
__magic_name__ : Optional[int] = depths
__magic_name__ : int = mlp_expansion_ratio
__magic_name__ : Any = downsamples
__magic_name__ : Union[str, Any] = dim
__magic_name__ : Optional[Any] = key_dim
__magic_name__ : List[str] = attention_ratio
__magic_name__ : int = resolution
__magic_name__ : Optional[int] = pool_size
__magic_name__ : Optional[int] = downsample_patch_size
__magic_name__ : Tuple = downsample_stride
__magic_name__ : Dict = downsample_pad
__magic_name__ : Optional[int] = drop_path_rate
__magic_name__ : str = num_metaad_blocks
__magic_name__ : List[Any] = distillation
__magic_name__ : Tuple = use_layer_scale
__magic_name__ : Union[str, Any] = layer_scale_init_value
__magic_name__ : Union[str, Any] = image_size
__magic_name__ : Dict = batch_norm_eps | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase :Union[str, Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
return [ord(lowerCAmelCase ) - 96 for elem in plain]
def lowerCamelCase ( lowerCAmelCase : list[int] ):
"""simple docstring"""
return "".join(chr(elem + 96 ) for elem in encoded )
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = encode(input('-> ' ).strip().lower() )
print('Encoded: ' , lowerCAmelCase )
print('Decoded:' , decode(lowerCAmelCase ) )
if __name__ == "__main__":
main() | 331 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase :Any = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , **_A : Union[str, Any] ) -> Tuple:
super().__init__(**_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] , _A : Union[str, List[str], "Image", List["Image"]] , **_A : Dict ) -> Dict:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> Optional[int]:
__magic_name__ : str = {}
if "candidate_labels" in kwargs:
__magic_name__ : str = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__magic_name__ : Tuple = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __lowerCAmelCase ( self : str , _A : Dict , _A : Optional[Any]=None , _A : int="This is a photo of {}." ) -> int:
__magic_name__ : Dict = load_image(_A )
__magic_name__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
__magic_name__ : Optional[Any] = candidate_labels
__magic_name__ : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels]
__magic_name__ : str = self.tokenizer(_A , return_tensors=self.framework , padding=_A )
__magic_name__ : Optional[Any] = [text_inputs]
return inputs
def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> str:
__magic_name__ : str = model_inputs.pop('candidate_labels' )
__magic_name__ : str = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , _A ):
__magic_name__ : Dict = text_inputs[0]
else:
# Batching case.
__magic_name__ : Optional[Any] = text_inputs[0][0]
__magic_name__ : List[Any] = self.model(**_A , **_A )
__magic_name__ : str = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[int]:
__magic_name__ : Tuple = model_outputs.pop('candidate_labels' )
__magic_name__ : Union[str, Any] = model_outputs['logits'][0]
if self.framework == "pt":
__magic_name__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 )
__magic_name__ : Tuple = probs.tolist()
if not isinstance(_A , _A ):
__magic_name__ : Any = [scores]
elif self.framework == "tf":
__magic_name__ : Any = stable_softmax(_A , axis=-1 )
__magic_name__ : Dict = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__magic_name__ : Union[str, Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] )
]
return result | 331 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return sum(i for i in range(1 , number // 2 + 1 ) if number % i == 0 ) == number
if __name__ == "__main__":
print('''Program to check whether a number is a Perfect number or not...''')
lowerCAmelCase :int = int(input('''Enter number: ''').strip())
print(F'{number} is {"" if perfect(number) else "not "}a Perfect Number.') | 331 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase :int = '''pt'''
elif is_tf_available():
lowerCAmelCase :Optional[Any] = '''tf'''
else:
lowerCAmelCase :Optional[Any] = '''jax'''
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = ByTaTokenizer
A_ : Dict = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
super().setUp()
__magic_name__ : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __lowerCAmelCase ( self : Tuple , **_A : Optional[int] ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : int=False , _A : Union[str, Any]=20 , _A : Optional[int]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__magic_name__ : Optional[Any] = []
for i in range(len(_A ) ):
try:
__magic_name__ : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__magic_name__ : Any = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) )
__magic_name__ : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
__magic_name__ : Optional[int] = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
__magic_name__ : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__magic_name__ : List[str] = [t[0] for t in toks]
# Ensure consistency
__magic_name__ : Optional[int] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
__magic_name__ : int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
__magic_name__ : Union[str, Any] = ' ' + output_txt
__magic_name__ : Dict = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def __lowerCAmelCase ( self : int ) -> str:
__magic_name__ : Any = self.ta_base_tokenizer
__magic_name__ : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
__magic_name__ : List[str] = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __lowerCAmelCase ( self : int ) -> Tuple:
__magic_name__ : Optional[int] = self.ta_base_tokenizer
__magic_name__ : Optional[int] = 'Unicode €.'
__magic_name__ : Optional[Any] = tokenizer(_A )
__magic_name__ : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__magic_name__ : Any = tokenizer.decode(_A )
self.assertEqual(_A , 'Unicode €.</s>' )
__magic_name__ : Any = tokenizer('e è é ê ë' )
__magic_name__ : str = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__magic_name__ : List[str] = tokenizer.decode(_A )
self.assertEqual(_A , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __lowerCAmelCase ( self : Any ) -> int:
__magic_name__ : List[Any] = self.ta_base_tokenizer
__magic_name__ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__magic_name__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__magic_name__ : Any = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
__magic_name__ : str = list(batch.input_ids.numpy()[0] )
else:
__magic_name__ : Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : str = self.ta_base_tokenizer
__magic_name__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__magic_name__ : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _A )
self.assertIn('attention_mask' , _A )
self.assertNotIn('decoder_input_ids' , _A )
self.assertNotIn('decoder_attention_mask' , _A )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
__magic_name__ : Union[str, Any] = self.ta_base_tokenizer
__magic_name__ : Tuple = [
'Summary of the text.',
'Another summary.',
]
__magic_name__ : Dict = tokenizer(
text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : str = self.ta_base_tokenizer
__magic_name__ : Any = ['A long paragraph for summarization. </s>']
__magic_name__ : List[str] = ['Summary of the text. </s>']
# fmt: off
__magic_name__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__magic_name__ : List[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__magic_name__ : str = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['input_ids'][0] )
self.assertEqual(_A , batch['labels'][0] )
def __lowerCAmelCase ( self : Any ) -> str:
# safety check on max_len default value so we are sure the test works
__magic_name__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__magic_name__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : str = tempfile.mkdtemp()
__magic_name__ : Tuple = ' He is very happy, UNwant\u00E9d,running'
__magic_name__ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ : List[str] = tokenizer.__class__.from_pretrained(_A )
__magic_name__ : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
__magic_name__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : Optional[Any] = tempfile.mkdtemp()
__magic_name__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__magic_name__ : Union[str, Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ : Any = tokenizer.__class__.from_pretrained(_A )
__magic_name__ : Dict = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__magic_name__ : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
__magic_name__ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__magic_name__ : Union[str, Any] = json.load(_A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__magic_name__ : Optional[Any] = json.load(_A )
__magic_name__ : List[str] = [F'<extra_id_{i}>' for i in range(125 )]
__magic_name__ : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
__magic_name__ : Tuple = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__ : str = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )]
__magic_name__ : Optional[Any] = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
__magic_name__ : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
__magic_name__ : List[Any] = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
pass
def __lowerCAmelCase ( self : List[str] ) -> int:
pass
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
pass
def __lowerCAmelCase ( self : List[Any] ) -> int:
pass
def __lowerCAmelCase ( self : str ) -> Tuple:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__magic_name__ : List[str] = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
__magic_name__ : int = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : List[str] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
__magic_name__ : List[str] = 0
__magic_name__ : str = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] )
setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] ) | 331 | 1 |
'''simple docstring'''
from __future__ import annotations
import collections
import tempfile
import unittest
import numpy as np
from transformers.testing_utils import require_tf, require_vision, slow
from transformers.utils import is_tf_available, is_vision_available
from ...test_modeling_tf_common import floats_tensor, ids_tensor, random_attention_mask
from ..bert.test_modeling_tf_bert import TFBertModelTester
from ..clip.test_modeling_tf_clip import TFCLIPVisionModelTester
from ..deit.test_modeling_tf_deit import TFDeiTModelTester
from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
from ..vit.test_modeling_tf_vit import TFViTModelTester
if is_tf_available():
from transformers import (
TFBertModel,
TFCLIPVisionModel,
TFDeiTModel,
TFRobertaModel,
TFVisionTextDualEncoderModel,
TFViTModel,
VisionTextDualEncoderConfig,
)
if is_vision_available():
from PIL import Image
from transformers import VisionTextDualEncoderProcessor
def lowerCamelCase ( lowerCAmelCase : List[str] ):
"""simple docstring"""
if isinstance(lowerCAmelCase , collections.abc.Iterable ):
return x
return (x, x)
@require_tf
class _lowerCamelCase :
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Any ) -> Any:
pass
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
pass
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
pass
def __lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any] , _A : List[str] , _A : List[Any] , _A : List[Any] , _A : Tuple=None , **_A : str ) -> List[Any]:
__magic_name__ : str = VisionTextDualEncoderConfig.from_vision_text_configs(_A , _A )
__magic_name__ : Union[str, Any] = TFVisionTextDualEncoderModel(_A )
__magic_name__ : Union[str, Any] = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], config.projection_dim) )
def __lowerCAmelCase ( self : Any , _A : Tuple , _A : Tuple , _A : str , _A : Optional[int] , _A : Any=None , **_A : int ) -> Any:
__magic_name__ , __magic_name__ : Tuple = self.get_vision_text_model(_A , _A )
__magic_name__ : List[Any] = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__magic_name__ : Dict = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowerCAmelCase ( self : Dict , _A : Any , _A : Dict , _A : List[Any] , _A : List[str] , _A : Optional[Any]=None , **_A : List[Any] ) -> Any:
__magic_name__ , __magic_name__ : List[str] = self.get_vision_text_model(_A , _A )
__magic_name__ : Optional[int] = {'vision_model': vision_model, 'text_model': text_model}
__magic_name__ : Optional[int] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(**_A )
__magic_name__ : Optional[int] = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
self.assertEqual(output['text_embeds'].shape , (input_ids.shape[0], model.config.projection_dim) )
self.assertEqual(output['image_embeds'].shape , (pixel_values.shape[0], model.config.projection_dim) )
def __lowerCAmelCase ( self : int , _A : Union[str, Any] , _A : Optional[int] , _A : int , _A : Dict , _A : Union[str, Any]=None , **_A : int ) -> Optional[Any]:
__magic_name__ , __magic_name__ : Any = self.get_vision_text_model(_A , _A )
__magic_name__ : int = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__magic_name__ : Union[str, Any] = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__magic_name__ : str = output[0].numpy()
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A )
__magic_name__ : List[Any] = TFVisionTextDualEncoderModel.from_pretrained(_A )
__magic_name__ : Tuple = model(input_ids=_A , pixel_values=_A , attention_mask=_A )
__magic_name__ : int = after_output[0].numpy()
__magic_name__ : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Union[str, Any]=None , **_A : str ) -> Dict:
__magic_name__ , __magic_name__ : Optional[int] = self.get_vision_text_model(_A , _A )
__magic_name__ : Any = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__magic_name__ : Any = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
__magic_name__ : List[str] = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in ViT, the seq_len equals the number of patches + 1 (we add 1 for the [CLS] token)
__magic_name__ : Tuple = to_atuple(vision_model.config.image_size )
__magic_name__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
__magic_name__ : str = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__magic_name__ : List[str] = num_patches + 1
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__magic_name__ : Any = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowerCAmelCase ( self : Dict , _A : np.ndarray , _A : np.ndarray , _A : float ) -> Union[str, Any]:
__magic_name__ : Union[str, Any] = np.abs((a - b) ).max()
self.assertLessEqual(_A , _A , F'Difference between torch and flax is {diff} (>= {tol}).' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
__magic_name__ : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_model(**_A )
def __lowerCAmelCase ( self : Any ) -> Any:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
self.check_model_from_pretrained_configs(**_A )
def __lowerCAmelCase ( self : List[str] ) -> int:
__magic_name__ : Optional[Any] = self.prepare_config_and_inputs()
self.check_vision_text_dual_encoder_from_pretrained(**_A )
def __lowerCAmelCase ( self : str ) -> Tuple:
__magic_name__ : str = self.prepare_config_and_inputs()
self.check_save_load(**_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
__magic_name__ : Optional[int] = self.prepare_config_and_inputs()
self.check_vision_text_output_attention(**_A )
@slow
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ , __magic_name__ : Tuple = self.get_pretrained_model_and_inputs()
__magic_name__ : Tuple = model_a(**_A )
__magic_name__ : str = outputs[0].numpy()
with tempfile.TemporaryDirectory() as tmp_dirname:
model_a.save_pretrained(_A )
__magic_name__ : Optional[Any] = TFVisionTextDualEncoderModel.from_pretrained(_A )
__magic_name__ : List[str] = model_a(**_A )
__magic_name__ : Any = after_outputs[0].numpy()
__magic_name__ : Tuple = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_A , 1E-5 )
@require_tf
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Dict ) -> str:
__magic_name__ : Union[str, Any] = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'hf-internal-testing/tiny-random-vit' , 'hf-internal-testing/tiny-random-bert' )
__magic_name__ : List[str] = 13
__magic_name__ : Optional[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ : Union[str, Any] = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ : Tuple = random_attention_mask([batch_size, 4] )
__magic_name__ : str = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowerCAmelCase ( self : str , _A : int , _A : int ) -> str:
__magic_name__ : Any = TFViTModel(_A , name='vision_model' )
__magic_name__ : int = TFBertModel(_A , name='text_model' )
return vision_model, text_model
def __lowerCAmelCase ( self : int ) -> int:
__magic_name__ : Tuple = TFViTModelTester(self )
__magic_name__ : str = TFBertModelTester(self )
__magic_name__ : int = vit_model_tester.prepare_config_and_inputs()
__magic_name__ : List[Any] = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ : str = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Optional[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
# DeiT repo doesn't have TF weights, but we don't actually use the weights at all so let's
# just reinitialize it.
__magic_name__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-deit-tf' , 'hf-internal-testing/tiny-random-roberta' )
__magic_name__ : Any = 13
__magic_name__ : List[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ : Tuple = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ : Optional[Any] = random_attention_mask([batch_size, 4] )
__magic_name__ : Optional[int] = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowerCAmelCase ( self : str , _A : int , _A : str , _A : Optional[Any] , _A : Optional[Any] , _A : str=None , **_A : List[Any] ) -> Dict:
__magic_name__ , __magic_name__ : Any = self.get_vision_text_model(_A , _A )
__magic_name__ : Dict = TFVisionTextDualEncoderModel(vision_model=_A , text_model=_A )
__magic_name__ : List[str] = model(
input_ids=_A , pixel_values=_A , attention_mask=_A , output_attentions=_A )
__magic_name__ : List[str] = output.vision_model_output.attentions
self.assertEqual(len(_A ) , vision_config.num_hidden_layers )
# in DEiT, the seq_len equals the number of patches + 2 (we add 2 for the [CLS] and distillation tokens)
__magic_name__ : str = to_atuple(vision_model.config.image_size )
__magic_name__ : Optional[Any] = to_atuple(vision_model.config.patch_size )
__magic_name__ : List[str] = (image_size[1] // patch_size[1]) * (image_size[0] // patch_size[0])
__magic_name__ : Dict = num_patches + 2
self.assertEqual(vision_attentions[0].shape[-3:] , (vision_config.num_attention_heads, seq_len, seq_len) )
__magic_name__ : Optional[Any] = output.text_model_output.attentions
self.assertEqual(len(_A ) , text_config.num_hidden_layers )
self.assertEqual(
text_attentions[0].shape[-3:] , (text_config.num_attention_heads, input_ids.shape[-1], input_ids.shape[-1]) , )
def __lowerCAmelCase ( self : Tuple , _A : Union[str, Any] , _A : List[str] ) -> int:
__magic_name__ : Dict = TFDeiTModel(_A , name='vision_model' )
__magic_name__ : str = TFRobertaModel(_A , name='text_model' )
return vision_model, text_model
def __lowerCAmelCase ( self : Optional[Any] ) -> Tuple:
__magic_name__ : Optional[Any] = TFDeiTModelTester(self )
__magic_name__ : List[Any] = TFRobertaModelTester(self )
__magic_name__ : Any = vit_model_tester.prepare_config_and_inputs()
__magic_name__ : Union[str, Any] = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ , __magic_name__ : Tuple = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : List[Any] = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_tf
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : int ) -> Union[str, Any]:
__magic_name__ : Any = TFVisionTextDualEncoderModel.from_vision_text_pretrained(
'Rocketknight1/tiny-random-clip-tf' , 'hf-internal-testing/tiny-random-bert' )
__magic_name__ : Tuple = 13
__magic_name__ : Optional[Any] = floats_tensor(
[
batch_size,
model.vision_model.config.num_channels,
model.vision_model.config.image_size,
model.vision_model.config.image_size,
] )
__magic_name__ : Dict = ids_tensor([batch_size, 4] , model.text_model.config.vocab_size )
__magic_name__ : Optional[int] = random_attention_mask([batch_size, 4] )
__magic_name__ : int = {'pixel_values': pixel_values, 'input_ids': input_ids, 'attention_mask': attention_mask}
return model, inputs
def __lowerCAmelCase ( self : List[str] , _A : List[str] , _A : Optional[int] ) -> int:
__magic_name__ : Any = TFCLIPVisionModel(_A , name='vision_model' )
__magic_name__ : Dict = TFBertModel(_A , name='text_model' )
return vision_model, text_model
def __lowerCAmelCase ( self : Any ) -> List[str]:
__magic_name__ : int = TFCLIPVisionModelTester(self )
__magic_name__ : Dict = TFBertModelTester(self )
__magic_name__ : Union[str, Any] = clip_model_tester.prepare_config_and_inputs()
__magic_name__ : Optional[Any] = bert_model_tester.prepare_config_and_inputs()
__magic_name__ , __magic_name__ : int = vision_config_and_inputs
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : Any = text_config_and_inputs
return {
"text_config": text_config,
"vision_config": vision_config,
"pixel_values": pixel_values,
"attention_mask": input_mask,
"input_ids": input_ids,
"text_token_type_ids": token_type_ids,
"text_sequence_labels": sequence_labels,
"text_token_labels": token_labels,
"text_choice_labels": choice_labels,
}
@require_vision
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : List[Any] ) -> str:
__magic_name__ : Any = TFVisionTextDualEncoderModel.from_pretrained(
'clip-italian/clip-italian' , logit_scale_init_value=1.0 , from_pt=_A )
__magic_name__ : Optional[Any] = VisionTextDualEncoderProcessor.from_pretrained('clip-italian/clip-italian' )
__magic_name__ : Optional[int] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
__magic_name__ : str = processor(
text=['una foto di un gatto', 'una foto di un cane'] , images=_A , padding=_A , return_tensors='np' )
__magic_name__ : int = model(**_A )
# verify the logits
self.assertEqual(outputs.logits_per_image.shape , (inputs.pixel_values.shape[0], inputs.input_ids.shape[0]) )
self.assertEqual(
outputs.logits_per_text.shape , (inputs.input_ids.shape[0], inputs.pixel_values.shape[0]) , )
__magic_name__ : Optional[int] = np.array([[1.228_4727, 0.310_4122]] )
self.assertTrue(np.allclose(outputs.logits_per_image.numpy() , _A , atol=1E-3 ) ) | 331 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ : Dict = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
__magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]]
__magic_name__ : List[Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 )
__magic_name__ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 )
__magic_name__ : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 )
__magic_name__ : Any = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] ) | 331 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase :List[str] = logging.get_logger(__name__)
lowerCAmelCase :List[Any] = {
'''edbeeching/decision-transformer-gym-hopper-medium''': (
'''https://huggingface.co/edbeeching/decision-transformer-gym-hopper-medium/resolve/main/config.json'''
),
# See all DecisionTransformer models at https://huggingface.co/models?filter=decision_transformer
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Any = """decision_transformer"""
A_ : Optional[Any] = ["""past_key_values"""]
A_ : List[str] = {
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : Any , _A : List[str]=17 , _A : Optional[int]=4 , _A : int=128 , _A : Union[str, Any]=4096 , _A : int=True , _A : int=1 , _A : int=1024 , _A : List[str]=3 , _A : List[str]=1 , _A : Optional[int]=None , _A : Dict="relu" , _A : Dict=0.1 , _A : str=0.1 , _A : List[Any]=0.1 , _A : str=1E-5 , _A : int=0.02 , _A : Union[str, Any]=True , _A : List[Any]=True , _A : int=50256 , _A : Optional[int]=50256 , _A : Optional[Any]=False , _A : List[str]=False , **_A : Optional[Any] , ) -> Tuple:
__magic_name__ : Union[str, Any] = state_dim
__magic_name__ : List[Any] = act_dim
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : Dict = max_ep_len
__magic_name__ : str = action_tanh
__magic_name__ : Union[str, Any] = vocab_size
__magic_name__ : Union[str, Any] = n_positions
__magic_name__ : Optional[Any] = n_layer
__magic_name__ : Union[str, Any] = n_head
__magic_name__ : List[Any] = n_inner
__magic_name__ : Dict = activation_function
__magic_name__ : Optional[Any] = resid_pdrop
__magic_name__ : str = embd_pdrop
__magic_name__ : Any = attn_pdrop
__magic_name__ : Any = layer_norm_epsilon
__magic_name__ : List[Any] = initializer_range
__magic_name__ : Optional[Any] = scale_attn_weights
__magic_name__ : Any = use_cache
__magic_name__ : str = scale_attn_by_inverse_layer_idx
__magic_name__ : Dict = reorder_and_upcast_attn
__magic_name__ : Tuple = bos_token_id
__magic_name__ : Any = eos_token_id
super().__init__(bos_token_id=_A , eos_token_id=_A , **_A ) | 331 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase :List[str] = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
lowerCAmelCase :List[Any] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Tuple = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Optional[Any] = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[int] = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowerCAmelCase :Tuple = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Union[str, Any] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowerCAmelCase :Dict = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowerCAmelCase :Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowerCAmelCase :Any = ''''''
lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
__magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : str = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
__magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Any = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) | 331 | 1 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Dict = (DDPMScheduler,)
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str:
__magic_name__ : str = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_A )
return config
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Dict = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self : Tuple ) -> int:
__magic_name__ : Tuple = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : str = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Union[str, Any] = self.dummy_model()
__magic_name__ : List[Any] = self.dummy_sample_deter
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : Tuple = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : Dict = pred_prev_sample
__magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) )
__magic_name__ : Dict = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
__magic_name__ : Any = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Dict = self.dummy_model()
__magic_name__ : str = self.dummy_sample_deter
__magic_name__ : str = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : List[Any] = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : List[Any] = pred_prev_sample
__magic_name__ : int = torch.sum(torch.abs(_A ) )
__magic_name__ : Any = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowerCAmelCase ( self : List[str] ) -> str:
__magic_name__ : Dict = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Optional[Any] = scheduler_class(**_A )
__magic_name__ : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_A )
__magic_name__ : List[str] = scheduler.timesteps
for i, timestep in enumerate(_A ):
if i == len(_A ) - 1:
__magic_name__ : Optional[int] = -1
else:
__magic_name__ : List[Any] = timesteps[i + 1]
__magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A )
__magic_name__ : Any = prev_t.item()
self.assertEqual(_A , _A )
def __lowerCAmelCase ( self : Tuple ) -> str:
__magic_name__ : str = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 1, 0]
__magic_name__ : Tuple = len(_A )
with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_A ) | 331 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Optional[int]=7 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=True , _A : Dict=True , _A : int=99 , _A : str=32 , _A : List[Any]=2 , _A : Any=4 , _A : List[str]=37 , _A : List[str]="gelu" , _A : Any=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Union[str, Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : str=4 , _A : int=None , ) -> int:
__magic_name__ : str = parent
__magic_name__ : List[Any] = 13
__magic_name__ : Union[str, Any] = 7
__magic_name__ : Tuple = True
__magic_name__ : Dict = True
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = True
__magic_name__ : int = 99
__magic_name__ : List[str] = 384
__magic_name__ : Optional[int] = 2
__magic_name__ : List[Any] = 4
__magic_name__ : int = 37
__magic_name__ : Union[str, Any] = 'gelu'
__magic_name__ : Optional[int] = 0.1
__magic_name__ : str = 0.1
__magic_name__ : Optional[Any] = 512
__magic_name__ : Any = 16
__magic_name__ : Union[str, Any] = 2
__magic_name__ : Any = 0.02
__magic_name__ : List[str] = 3
__magic_name__ : Tuple = 4
__magic_name__ : List[Any] = 128
__magic_name__ : Optional[Any] = 2
__magic_name__ : List[str] = 9
__magic_name__ : str = 1
__magic_name__ : List[str] = None
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[Any] = None
if self.use_input_mask:
__magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[str] = None
if self.use_token_type_ids:
__magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
__magic_name__ : int = None
if self.use_labels:
__magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : int , _A : int , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : int , _A : Union[str, Any] ) -> Any:
__magic_name__ : Dict = TFConvBertModel(config=_A )
__magic_name__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__magic_name__ : Any = [input_ids, input_mask]
__magic_name__ : Tuple = model(_A )
__magic_name__ : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : int , _A : str , _A : Dict , _A : Dict , _A : Dict , _A : Any , _A : Optional[int] , _A : int ) -> Optional[Any]:
__magic_name__ : Dict = TFConvBertForMaskedLM(config=_A )
__magic_name__ : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> Tuple:
__magic_name__ : Any = self.num_labels
__magic_name__ : str = TFConvBertForSequenceClassification(config=_A )
__magic_name__ : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : str , _A : str , _A : int , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = self.num_choices
__magic_name__ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
__magic_name__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : str = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Optional[int] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : List[Any] , _A : int , _A : List[str] , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : Optional[int] ) -> List[Any]:
__magic_name__ : List[Any] = self.num_labels
__magic_name__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
__magic_name__ : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[str] ) -> int:
__magic_name__ : Dict = TFConvBertForQuestionAnswering(config=_A )
__magic_name__ : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[str] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : str = config_and_inputs
__magic_name__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : List[str] = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Tuple = False
A_ : Any = False
A_ : List[Any] = False
def __lowerCAmelCase ( self : List[Any] ) -> int:
__magic_name__ : Optional[Any] = TFConvBertModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : int ) -> Any:
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : Dict ) -> List[str]:
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[int] = True
__magic_name__ : Any = True
if hasattr(_A , 'use_cache' ):
__magic_name__ : List[Any] = True
__magic_name__ : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : Optional[Any] = getattr(self.model_tester , 'key_length' , _A )
for model_class in self.all_model_classes:
__magic_name__ : List[str] = self._prepare_for_class(_A , _A )
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Tuple = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
__magic_name__ : Union[str, Any] = os.path.join(_A , 'saved_model' , '1' )
__magic_name__ : Optional[int] = tf.keras.models.load_model(_A )
__magic_name__ : Optional[Any] = model(_A )
if self.is_encoder_decoder:
__magic_name__ : Optional[int] = outputs['encoder_hidden_states']
__magic_name__ : Tuple = outputs['encoder_attentions']
else:
__magic_name__ : Union[str, Any] = outputs['hidden_states']
__magic_name__ : Optional[Any] = outputs['attentions']
self.assertEqual(len(_A ) , _A )
__magic_name__ : Optional[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
__magic_name__ : Optional[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_A )
def __lowerCAmelCase ( self : List[str] ) -> Any:
__magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : str = True
__magic_name__ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'key_length' , _A )
__magic_name__ : Optional[int] = getattr(self.model_tester , 'key_length' , _A )
def check_decoder_attentions_output(_A : List[Any] ):
__magic_name__ : Tuple = len(_A )
self.assertEqual(out_len % 2 , 0 )
__magic_name__ : Any = outputs.decoder_attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_A : int ):
__magic_name__ : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = False
__magic_name__ : List[str] = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
__magic_name__ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
__magic_name__ : Any = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ : Optional[int] = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Optional[int] = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
__magic_name__ : str = True
__magic_name__ : str = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : str = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) )
self.assertEqual(model.config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : int ) -> int:
__magic_name__ : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__magic_name__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : Tuple = model(_A )[0]
__magic_name__ : str = [1, 6, 768]
self.assertEqual(output.shape , _A )
__magic_name__ : Tuple = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 ) | 331 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available
lowerCAmelCase :List[Any] = {
'''configuration_megatron_bert''': ['''MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MegatronBertConfig'''],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[int] = [
'''MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MegatronBertForCausalLM''',
'''MegatronBertForMaskedLM''',
'''MegatronBertForMultipleChoice''',
'''MegatronBertForNextSentencePrediction''',
'''MegatronBertForPreTraining''',
'''MegatronBertForQuestionAnswering''',
'''MegatronBertForSequenceClassification''',
'''MegatronBertForTokenClassification''',
'''MegatronBertModel''',
'''MegatronBertPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_megatron_bert import MEGATRON_BERT_PRETRAINED_CONFIG_ARCHIVE_MAP, MegatronBertConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_megatron_bert import (
MEGATRON_BERT_PRETRAINED_MODEL_ARCHIVE_LIST,
MegatronBertForCausalLM,
MegatronBertForMaskedLM,
MegatronBertForMultipleChoice,
MegatronBertForNextSentencePrediction,
MegatronBertForPreTraining,
MegatronBertForQuestionAnswering,
MegatronBertForSequenceClassification,
MegatronBertForTokenClassification,
MegatronBertModel,
MegatronBertPreTrainedModel,
)
else:
import sys
lowerCAmelCase :List[str] = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase :Dict = pytest.mark.integration
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
__magic_name__ : Union[str, Any] = dset.map(
lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A )
__magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def __lowerCAmelCase ( self : Any ) -> str:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Tuple ) -> int:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
from elasticsearch import Elasticsearch
__magic_name__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : int = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__magic_name__ : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_A )
__magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
import faiss
__magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__magic_name__ : str = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Optional[int] = 1
__magic_name__ , __magic_name__ : str = index.search(_A )
self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1]
__magic_name__ , __magic_name__ : str = index.search_batch(_A )
self.assertRaises(_A , index.search_batch , queries[0] )
__magic_name__ : List[Any] = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _A )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
import faiss
__magic_name__ : str = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__magic_name__ : str = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_A ):
__magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
import faiss
__magic_name__ : Any = faiss.IndexFlat(5 )
__magic_name__ : Optional[Any] = FaissIndex(custom_index=_A )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
import faiss
__magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
index.save(tmp_file.name )
__magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ : Dict = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Tuple = 1
__magic_name__ , __magic_name__ : Optional[Any] = index.search(_A )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
import faiss
__magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__magic_name__ : Dict = 'index.faiss'
__magic_name__ : Optional[Any] = f'mock://{index_name}'
index.save(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
__magic_name__ : List[str] = 1
__magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> Dict:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : Any = Elasticsearch()
__magic_name__ : Union[str, Any] = {'acknowledged': True}
__magic_name__ : Tuple = ElasticSearchIndex(es_client=_A )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__magic_name__ : str = 'foo'
__magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__magic_name__ : str = 'foo'
__magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A )
__magic_name__ : Tuple = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A )
# batched queries with timeout
__magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 )
__magic_name__ : Optional[int] = [scores[0] for scores in total_scores]
__magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A ) | 331 | 1 |
'''simple docstring'''
import os
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : int = os.path.join(os.path.dirname(lowerCAmelCase ) , 'num.txt' )
with open(lowerCAmelCase ) as file_hand:
return str(sum(int(lowerCAmelCase ) for line in file_hand ) )[:10]
if __name__ == "__main__":
print(solution()) | 331 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() )
__magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__)
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
"""simple docstring"""
if metric == "rouge2":
__magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
__magic_name__ : int = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
__magic_name__ : List[Any] = ModelCheckpoint(
dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return EarlyStopping(
monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int:
__magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None:
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
__magic_name__ : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
__magic_name__ : List[Any] = od / 'test_results.txt'
__magic_name__ : Dict = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
__magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , 'a+' ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
__magic_name__ : Optional[Any] = metrics[key]
if isinstance(_A , torch.Tensor ):
__magic_name__ : Tuple = val.item()
__magic_name__ : int = F'{key}: {val:.6f}\n'
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
__magic_name__ : str = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_A )
@rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple:
try:
__magic_name__ : str = pl_module.model.model.num_parameters()
except AttributeError:
__magic_name__ : List[str] = pl_module.model.num_parameters()
__magic_name__ : List[Any] = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , 'test' )
@rank_zero_only
def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 331 | 1 |
'''simple docstring'''
import unittest
from transformers.models.xlm_prophetnet.tokenization_xlm_prophetnet import SPIECE_UNDERLINE, XLMProphetNetTokenizer
from transformers.testing_utils import get_tests_dir, require_sentencepiece, slow
from transformers.utils import cached_property
from ...test_tokenization_common import TokenizerTesterMixin
lowerCAmelCase :Dict = get_tests_dir('''fixtures/test_sentencepiece.model''')
@require_sentencepiece
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[str] = XLMProphetNetTokenizer
A_ : List[Any] = False
A_ : str = True
def __lowerCAmelCase ( self : Dict ) -> List[Any]:
super().setUp()
# We have a SentencePiece fixture for testing
__magic_name__ : Optional[Any] = XLMProphetNetTokenizer(_A , keep_accents=_A )
tokenizer.save_pretrained(self.tmpdirname )
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
__magic_name__ : Optional[Any] = '[PAD]'
__magic_name__ : Optional[Any] = 0
self.assertEqual(self.get_tokenizer()._convert_token_to_id(_A ) , _A )
self.assertEqual(self.get_tokenizer()._convert_id_to_token(_A ) , _A )
def __lowerCAmelCase ( self : List[str] ) -> int:
__magic_name__ : Any = list(self.get_tokenizer().get_vocab().keys() )
self.assertEqual(vocab_keys[0] , '[PAD]' )
self.assertEqual(vocab_keys[1] , '[CLS]' )
self.assertEqual(vocab_keys[-1] , 'j' )
self.assertEqual(len(_A ) , 1012 )
def __lowerCAmelCase ( self : Any ) -> Tuple:
self.assertEqual(self.get_tokenizer().vocab_size , 1012 )
def __lowerCAmelCase ( self : List[Any] ) -> List[str]:
__magic_name__ : Any = XLMProphetNetTokenizer(_A , keep_accents=_A )
__magic_name__ : List[str] = tokenizer.tokenize('This is a test' )
self.assertListEqual(_A , ['▁This', '▁is', '▁a', '▁t', 'est'] )
self.assertListEqual(
tokenizer.convert_tokens_to_ids(_A ) , [value + tokenizer.fairseq_offset for value in [285, 46, 10, 170, 382]] , )
__magic_name__ : List[Any] = tokenizer.tokenize('I was born in 92000, and this is falsé.' )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'9',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'é',
'.',
] , )
__magic_name__ : int = tokenizer.convert_tokens_to_ids(_A )
self.assertListEqual(
_A , [
value + tokenizer.fairseq_offset
for value in [8, 21, 84, 55, 24, 19, 7, -9, 602, 347, 347, 347, 3, 12, 66, 46, 72, 80, 6, -9, 4]
] , )
__magic_name__ : Optional[int] = tokenizer.convert_ids_to_tokens(_A )
self.assertListEqual(
_A , [
SPIECE_UNDERLINE + 'I',
SPIECE_UNDERLINE + 'was',
SPIECE_UNDERLINE + 'b',
'or',
'n',
SPIECE_UNDERLINE + 'in',
SPIECE_UNDERLINE + '',
'[UNK]',
'2',
'0',
'0',
'0',
',',
SPIECE_UNDERLINE + 'and',
SPIECE_UNDERLINE + 'this',
SPIECE_UNDERLINE + 'is',
SPIECE_UNDERLINE + 'f',
'al',
's',
'[UNK]',
'.',
] , )
@cached_property
def __lowerCAmelCase ( self : Any ) -> Optional[Any]:
return XLMProphetNetTokenizer.from_pretrained('microsoft/xprophetnet-large-wiki100-cased' )
@slow
def __lowerCAmelCase ( self : Tuple ) -> Any:
__magic_name__ : Optional[Any] = 'Hello World!'
__magic_name__ : List[Any] = [35389, 6672, 49, 2]
self.assertListEqual(_A , self.big_tokenizer.encode(_A ) )
@slow
def __lowerCAmelCase ( self : str ) -> Any:
# fmt: off
__magic_name__ : Dict = {'input_ids': [[11073, 82783, 18, 26, 82783, 549, 51540, 248, 17209, 1301, 217, 20, 215186, 1325, 147, 17209, 1301, 217, 20, 56370, 53, 122020, 20, 16477, 27, 87355, 4548, 20, 4728, 78392, 17, 159969, 18, 26, 24491, 629, 15, 538, 22704, 5439, 15, 2788, 24491, 9885, 15, 43534, 605, 15, 814, 18403, 33200, 29, 15, 43534, 24458, 12410, 111, 24966, 83669, 9637, 144068, 26, 850, 22346, 27, 147, 24966, 83669, 83490, 26, 39113, 735, 27, 689, 656, 2800, 1339, 4600, 53, 122020, 115785, 34, 816, 1339, 46887, 18, 147, 53905, 1951, 42238, 41170, 17732, 834, 436, 15, 27523, 98733, 217, 147, 5542, 4981, 930, 17347, 16, 2], [20091, 629, 94, 82786, 58, 490, 20, 1528, 84, 53905, 344, 80592, 110128, 18822, 5267, 1306, 62, 152537, 308, 7997, 401, 124427, 549, 35442, 225, 109, 15055, 25748, 147, 7119, 43712, 34, 767, 135366, 18, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [592, 63784, 119466, 17, 147808, 88214, 18, 656, 81, 32, 3296, 10280, 16, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]], 'attention_mask': [[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0]]} # noqa: E501
# fmt: on
self.tokenizer_integration_test_util(
expected_encoding=_A , model_name='microsoft/xprophetnet-large-wiki100-cased' , revision='1acad1643ddd54a44df6a1b797ada8373685d90e' , ) | 331 |
'''simple docstring'''
def lowerCamelCase ( ):
"""simple docstring"""
return 1
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int = 200 ):
"""simple docstring"""
return two_pound(lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 331 | 1 |
'''simple docstring'''
import argparse
from pathlib import Path
from transformers import AutoConfig, AutoTokenizer, RagConfig, RagSequenceForGeneration, RagTokenForGeneration
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : str , lowerCAmelCase : str , lowerCAmelCase : Path , lowerCAmelCase : str = None , lowerCAmelCase : str = None , lowerCAmelCase : str = None , ):
"""simple docstring"""
if config_name_or_path is None:
__magic_name__ : Any = 'facebook/rag-token-base' if model_type == 'rag_token' else 'facebook/rag-sequence-base'
if generator_tokenizer_name_or_path is None:
__magic_name__ : Optional[int] = generator_name_or_path
if question_encoder_tokenizer_name_or_path is None:
__magic_name__ : Dict = question_encoder_name_or_path
__magic_name__ : Optional[int] = RagTokenForGeneration if model_type == 'rag_token' else RagSequenceForGeneration
# Save model.
__magic_name__ : Optional[int] = RagConfig.from_pretrained(lowerCAmelCase )
__magic_name__ : Tuple = AutoConfig.from_pretrained(lowerCAmelCase )
__magic_name__ : List[Any] = AutoConfig.from_pretrained(lowerCAmelCase )
__magic_name__ : List[str] = gen_config
__magic_name__ : Optional[Any] = question_encoder_config
__magic_name__ : Tuple = model_class.from_pretrained_question_encoder_generator(
lowerCAmelCase , lowerCAmelCase , config=lowerCAmelCase )
rag_model.save_pretrained(lowerCAmelCase )
# Sanity check.
model_class.from_pretrained(lowerCAmelCase )
# Save tokenizers.
__magic_name__ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
gen_tokenizer.save_pretrained(dest_dir / 'generator_tokenizer/' )
__magic_name__ : Optional[Any] = AutoTokenizer.from_pretrained(lowerCAmelCase )
question_encoder_tokenizer.save_pretrained(dest_dir / 'question_encoder_tokenizer/' )
if __name__ == "__main__":
lowerCAmelCase :Any = argparse.ArgumentParser()
parser.add_argument(
'''--model_type''',
choices=['''rag_sequence''', '''rag_token'''],
required=True,
type=str,
help='''RAG model type: rag_sequence, rag_token''',
)
parser.add_argument('''--dest''', type=str, required=True, help='''Path to the output checkpoint directory.''')
parser.add_argument('''--generator_name_or_path''', type=str, required=True, help='''Generator model identifier''')
parser.add_argument(
'''--question_encoder_name_or_path''', type=str, required=True, help='''Question encoder model identifier'''
)
parser.add_argument(
'''--generator_tokenizer_name_or_path''',
type=str,
help='''Generator tokenizer identifier, if not specified, resolves to ``generator_name_or_path``''',
)
parser.add_argument(
'''--question_encoder_tokenizer_name_or_path''',
type=str,
help='''Question encoder tokenizer identifier, if not specified, resolves to ``question_encoder_name_or_path``''',
)
parser.add_argument(
'''--config_name_or_path''',
type=str,
help=(
'''Identifier of the model config to use, if not provided, resolves to a base config for a given'''
''' ``model_type``'''
),
)
lowerCAmelCase :Any = parser.parse_args()
lowerCAmelCase :Any = Path(args.dest)
dest_dir.mkdir(exist_ok=True)
consolidate(
args.model_type,
args.generator_name_or_path,
args.question_encoder_name_or_path,
dest_dir,
args.config_name_or_path,
args.generator_tokenizer_name_or_path,
args.question_encoder_tokenizer_name_or_path,
) | 331 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Dict = ["""flax""", """transformers"""]
def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[int] = ["""flax""", """transformers"""]
def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] ) | 331 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import AutoTokenizer, MBartConfig, is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import TFAutoModelForSeqaSeqLM, TFMBartForConditionalGeneration, TFMBartModel
@require_tf
class _lowerCamelCase :
'''simple docstring'''
A_ : Dict = MBartConfig
A_ : Any = {}
A_ : str = """gelu"""
def __init__( self : Optional[Any] , _A : int , _A : int=13 , _A : Optional[int]=7 , _A : Union[str, Any]=True , _A : Dict=False , _A : Optional[Any]=99 , _A : Dict=32 , _A : Any=2 , _A : Tuple=4 , _A : List[str]=37 , _A : List[str]=0.1 , _A : Union[str, Any]=0.1 , _A : List[Any]=20 , _A : str=2 , _A : int=1 , _A : int=0 , ) -> List[str]:
__magic_name__ : List[Any] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Optional[int] = seq_length
__magic_name__ : Union[str, Any] = is_training
__magic_name__ : Optional[int] = use_labels
__magic_name__ : int = vocab_size
__magic_name__ : Tuple = hidden_size
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : Tuple = num_attention_heads
__magic_name__ : Optional[int] = intermediate_size
__magic_name__ : Any = hidden_dropout_prob
__magic_name__ : Dict = attention_probs_dropout_prob
__magic_name__ : Optional[Any] = max_position_embeddings
__magic_name__ : str = eos_token_id
__magic_name__ : str = pad_token_id
__magic_name__ : Dict = bos_token_id
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
__magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length - 1] , self.vocab_size )
__magic_name__ : str = tf.expand_dims(tf.constant([self.eos_token_id] * self.batch_size ) , 1 )
__magic_name__ : List[str] = tf.concat([input_ids, eos_tensor] , axis=1 )
__magic_name__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Tuple = self.config_cls(
vocab_size=self.vocab_size , d_model=self.hidden_size , encoder_layers=self.num_hidden_layers , decoder_layers=self.num_hidden_layers , encoder_attention_heads=self.num_attention_heads , decoder_attention_heads=self.num_attention_heads , encoder_ffn_dim=self.intermediate_size , decoder_ffn_dim=self.intermediate_size , dropout=self.hidden_dropout_prob , attention_dropout=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , eos_token_ids=[2] , bos_token_id=self.bos_token_id , pad_token_id=self.pad_token_id , decoder_start_token_id=self.pad_token_id , **self.config_updates , )
__magic_name__ : Any = prepare_mbart_inputs_dict(_A , _A , _A )
return config, inputs_dict
def __lowerCAmelCase ( self : Optional[Any] , _A : List[Any] , _A : Tuple ) -> Dict:
__magic_name__ : Optional[int] = TFMBartModel(config=_A ).get_decoder()
__magic_name__ : Any = inputs_dict['input_ids']
__magic_name__ : int = input_ids[:1, :]
__magic_name__ : Dict = inputs_dict['attention_mask'][:1, :]
__magic_name__ : Union[str, Any] = inputs_dict['head_mask']
__magic_name__ : str = 1
# first forward pass
__magic_name__ : Tuple = model(_A , attention_mask=_A , head_mask=_A , use_cache=_A )
__magic_name__ , __magic_name__ : List[Any] = outputs.to_tuple()
__magic_name__ : Tuple = past_key_values[1]
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Dict , lowerCAmelCase : Tuple , lowerCAmelCase : Dict=None , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : List[str]=None , lowerCAmelCase : Any=None , ):
"""simple docstring"""
if attention_mask is None:
__magic_name__ : int = tf.cast(tf.math.not_equal(lowerCAmelCase , config.pad_token_id ) , tf.inta )
if decoder_attention_mask is None:
__magic_name__ : Tuple = tf.concat(
[
tf.ones(decoder_input_ids[:, :1].shape , dtype=tf.inta ),
tf.cast(tf.math.not_equal(decoder_input_ids[:, 1:] , config.pad_token_id ) , tf.inta ),
] , axis=-1 , )
if head_mask is None:
__magic_name__ : Tuple = tf.ones((config.encoder_layers, config.encoder_attention_heads) )
if decoder_head_mask is None:
__magic_name__ : Union[str, Any] = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
if cross_attn_head_mask is None:
__magic_name__ : str = tf.ones((config.decoder_layers, config.decoder_attention_heads) )
return {
"input_ids": input_ids,
"decoder_input_ids": decoder_input_ids,
"attention_mask": attention_mask,
"decoder_attention_mask": decoder_attention_mask,
"head_mask": head_mask,
"decoder_head_mask": decoder_head_mask,
"cross_attn_head_mask": cross_attn_head_mask,
}
@require_tf
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = (TFMBartForConditionalGeneration, TFMBartModel) if is_tf_available() else ()
A_ : Dict = (TFMBartForConditionalGeneration,) if is_tf_available() else ()
A_ : List[str] = (
{
"""conversational""": TFMBartForConditionalGeneration,
"""feature-extraction""": TFMBartModel,
"""summarization""": TFMBartForConditionalGeneration,
"""text2text-generation""": TFMBartForConditionalGeneration,
"""translation""": TFMBartForConditionalGeneration,
}
if is_tf_available()
else {}
)
A_ : Tuple = True
A_ : str = False
A_ : Dict = False
def __lowerCAmelCase ( self : Tuple , _A : Union[str, Any] , _A : Tuple , _A : Optional[Any] , _A : List[str] , _A : Union[str, Any] ) -> List[Any]:
if pipeline_test_casse_name != "FeatureExtractionPipelineTests":
# Exception encountered when calling layer '...'
return True
return False
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : List[str] = TFMBartModelTester(self )
__magic_name__ : List[str] = ConfigTester(self , config_class=_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : str ) -> List[str]:
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.check_decoder_model_past_large_inputs(*_A )
@require_sentencepiece
@require_tokenizers
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = [
""" UN Chief Says There Is No Military Solution in Syria""",
]
A_ : List[str] = [
"""Şeful ONU declară că nu există o soluţie militară în Siria""",
]
A_ : int = """facebook/mbart-large-en-ro"""
@cached_property
def __lowerCAmelCase ( self : Union[str, Any] ) -> str:
return AutoTokenizer.from_pretrained(self.model_name )
@cached_property
def __lowerCAmelCase ( self : Optional[int] ) -> Tuple:
__magic_name__ : Optional[Any] = TFAutoModelForSeqaSeqLM.from_pretrained(self.model_name )
return model
def __lowerCAmelCase ( self : Any , **_A : Union[str, Any] ) -> Dict:
__magic_name__ : Optional[Any] = self.translate_src_text(**_A )
self.assertListEqual(self.expected_text , _A )
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> int:
__magic_name__ : List[Any] = self.tokenizer(self.src_text , **_A , return_tensors='tf' )
__magic_name__ : int = self.model.generate(
model_inputs.input_ids , attention_mask=model_inputs.attention_mask , num_beams=2 )
__magic_name__ : Any = self.tokenizer.batch_decode(_A , skip_special_tokens=_A )
return generated_words
@slow
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
self._assert_generated_batch_equal_expected() | 331 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase :Tuple = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any:
super().__init__(*_A , **_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]:
__magic_name__ : Union[str, Any] = {}
__magic_name__ : Optional[Any] = {}
if prompt is not None:
__magic_name__ : Union[str, Any] = prompt
if generate_kwargs is not None:
__magic_name__ : str = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__magic_name__ : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
__magic_name__ : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict:
__magic_name__ : List[Any] = load_image(_A )
if prompt is not None:
if not isinstance(_A , _A ):
raise ValueError(
F'Received an invalid text input, got - {type(_A )} - but expected a single string. '
'Note also that one single text can be provided for conditional image to text generation.' )
__magic_name__ : Any = self.model.config.model_type
if model_type == "git":
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids
__magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids
__magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
__magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework )
model_inputs.update(_A )
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation' )
else:
__magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__magic_name__ : int = None
return model_inputs
def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _A )
and all(x is None for x in model_inputs['input_ids'] )
):
__magic_name__ : str = None
if generate_kwargs is None:
__magic_name__ : Optional[int] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name )
__magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A )
return model_outputs
def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]:
__magic_name__ : Optional[Any] = []
for output_ids in model_outputs:
__magic_name__ : Union[str, Any] = {
'generated_text': self.tokenizer.decode(
_A , skip_special_tokens=_A , )
}
records.append(_A )
return records | 331 | 1 |
'''simple docstring'''
import copy
from typing import TYPE_CHECKING, Any, Mapping, Optional, OrderedDict
from packaging import version
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
from ..auto.configuration_auto import AutoConfig
if TYPE_CHECKING:
from ... import PreTrainedTokenizerBase, TensorType
lowerCAmelCase :str = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = """vision-encoder-decoder"""
A_ : Any = True
def __init__( self : int , **_A : Optional[int] ) -> Any:
super().__init__(**_A )
if "encoder" not in kwargs or "decoder" not in kwargs:
raise ValueError(
F'A configuraton of type {self.model_type} cannot be instantiated because '
F'not both `encoder` and `decoder` sub-configurations are passed, but only {kwargs}' )
__magic_name__ : List[Any] = kwargs.pop('encoder' )
__magic_name__ : int = encoder_config.pop('model_type' )
__magic_name__ : Dict = kwargs.pop('decoder' )
__magic_name__ : Dict = decoder_config.pop('model_type' )
__magic_name__ : str = AutoConfig.for_model(_A , **_A )
__magic_name__ : str = AutoConfig.for_model(_A , **_A )
__magic_name__ : List[Any] = True
@classmethod
def __lowerCAmelCase ( cls : List[str] , _A : PretrainedConfig , _A : PretrainedConfig , **_A : str ) -> PretrainedConfig:
logger.info('Setting `config.is_decoder=True` and `config.add_cross_attention=True` for decoder_config' )
__magic_name__ : Tuple = True
__magic_name__ : Optional[int] = True
return cls(encoder=encoder_config.to_dict() , decoder=decoder_config.to_dict() , **_A )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
__magic_name__ : str = copy.deepcopy(self.__dict__ )
__magic_name__ : Dict = self.encoder.to_dict()
__magic_name__ : Tuple = self.decoder.to_dict()
__magic_name__ : str = self.__class__.model_type
return output
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[str] = version.parse("""1.11""" )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict(
[
('pixel_values', {0: 'batch', 1: 'num_channels', 2: 'height', 3: 'width'}),
] )
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> float:
return 1E-4
@property
def __lowerCAmelCase ( self : Tuple ) -> Mapping[str, Mapping[int, str]]:
return OrderedDict({'last_hidden_state': {0: 'batch', 1: 'encoder_sequence'}} )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : int ) -> Mapping[str, Mapping[int, str]]:
__magic_name__ : Union[str, Any] = OrderedDict()
__magic_name__ : Tuple = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
__magic_name__ : List[Any] = {0: 'batch', 1: 'past_decoder_sequence + sequence'}
__magic_name__ : List[str] = {0: 'batch', 1: 'encoder_sequence'}
return common_inputs
def __lowerCAmelCase ( self : str , _A : "PreTrainedTokenizerBase" , _A : int = -1 , _A : int = -1 , _A : bool = False , _A : Optional["TensorType"] = None , ) -> Mapping[str, Any]:
import torch
__magic_name__ : Optional[int] = OrderedDict()
__magic_name__ : List[Any] = super().generate_dummy_inputs(
_A , batch_size=_A , seq_length=_A , is_pair=_A , framework=_A )
__magic_name__ , __magic_name__ : int = dummy_input['input_ids'].shape
__magic_name__ : str = (batch, encoder_sequence, self._config.encoder_hidden_size)
__magic_name__ : List[Any] = dummy_input.pop('input_ids' )
__magic_name__ : Optional[Any] = dummy_input.pop('attention_mask' )
__magic_name__ : Any = torch.zeros(_A )
return common_inputs
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
@property
def __lowerCAmelCase ( self : Optional[Any] ) -> None:
pass
def __lowerCAmelCase ( self : List[str] , _A : PretrainedConfig ) -> OnnxConfig:
return VisionEncoderDecoderEncoderOnnxConfig(_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : PretrainedConfig , _A : PretrainedConfig , _A : str = "default" ) -> OnnxConfig:
__magic_name__ : Union[str, Any] = encoder_config.hidden_size
return VisionEncoderDecoderDecoderOnnxConfig(_A , _A ) | 331 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCAmelCase :Dict = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
lowerCAmelCase :str = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCAmelCase :Any = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys())
lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class _lowerCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_A )
__magic_name__ : List[str] = 0
__magic_name__ : Union[str, Any] = Path(self.hparams.output_dir )
__magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__magic_name__ : Optional[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , )
else:
__magic_name__ : PretrainedConfig = config
__magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , _A , _A ):
assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , _A , getattr(self.hparams , _A ) )
if tokenizer is None:
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , )
else:
__magic_name__ : PreTrainedTokenizer = tokenizer
__magic_name__ : Optional[int] = MODEL_MODES[mode]
if model is None:
__magic_name__ : Tuple = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , )
else:
__magic_name__ : str = model
def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple:
__magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler]
__magic_name__ : str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : Optional[Any] = self.model
__magic_name__ : int = ['bias', 'LayerNorm.weight']
__magic_name__ : Dict = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__magic_name__ : str = Adafactor(
_A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A )
else:
__magic_name__ : Tuple = AdamW(
_A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__magic_name__ : List[str] = optimizer
__magic_name__ : int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]:
return self.validation_step(_A , _A )
def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any:
return self.validation_end(_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
__magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str:
if stage == "test":
__magic_name__ : Any = len(self.test_dataloader().dataset )
else:
__magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A )
__magic_name__ : int = len(self.train_dataloader().dataset )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]:
raise NotImplementedError('You must implement this for your task' )
def __lowerCAmelCase ( self : int ) -> List[str]:
return self.train_loader
def __lowerCAmelCase ( self : Tuple ) -> int:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
_A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None:
__magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' )
__magic_name__ : List[Any] = self.step_count
self.model.save_pretrained(_A )
self.tokenizer.save_pretrained(_A )
@staticmethod
def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple:
parser.add_argument(
'--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A )
parser.add_argument('--train_batch_size' , default=32 , type=_A )
parser.add_argument('--eval_batch_size' , default=32 , type=_A )
parser.add_argument('--adafactor' , action='store_true' )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_A )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]:
__magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler']
__magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_A )
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]:
rank_zero_info('***** Validation results *****' )
__magic_name__ : str = trainer.callback_metrics
# Log results
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]:
rank_zero_info('***** Test results *****' )
__magic_name__ : Optional[int] = trainer.callback_metrics
# Log and save results to file
__magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(_A , 'w' ) as writer:
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__magic_name__ : Any = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
__magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase )
if logging_callback is None:
__magic_name__ : Dict = LoggingCallback()
__magic_name__ : List[str] = {}
if args.fpaa:
__magic_name__ : Dict = 16
if args.gpus > 1:
__magic_name__ : Tuple = 'auto'
__magic_name__ : int = 'ddp'
__magic_name__ : str = args.accumulate_grad_batches
__magic_name__ : str = None
__magic_name__ : List[str] = 'auto'
__magic_name__ : List[Any] = pl.Trainer.from_argparse_args(
lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , )
if args.do_train:
trainer.fit(lowerCAmelCase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer | 331 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
if not isinstance(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : int = f'Input value of [number={number}] must be an integer'
raise TypeError(lowerCAmelCase )
if number < 0:
return False
__magic_name__ : Any = number * number
while number > 0:
if number % 10 != number_square % 10:
return False
number //= 10
number_square //= 10
return True
if __name__ == "__main__":
import doctest
doctest.testmod() | 331 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Dict = (DDPMScheduler,)
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str:
__magic_name__ : str = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_A )
return config
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Dict = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self : Tuple ) -> int:
__magic_name__ : Tuple = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : str = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Union[str, Any] = self.dummy_model()
__magic_name__ : List[Any] = self.dummy_sample_deter
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : Tuple = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : Dict = pred_prev_sample
__magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) )
__magic_name__ : Dict = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
__magic_name__ : Any = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Dict = self.dummy_model()
__magic_name__ : str = self.dummy_sample_deter
__magic_name__ : str = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : List[Any] = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : List[Any] = pred_prev_sample
__magic_name__ : int = torch.sum(torch.abs(_A ) )
__magic_name__ : Any = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowerCAmelCase ( self : List[str] ) -> str:
__magic_name__ : Dict = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Optional[Any] = scheduler_class(**_A )
__magic_name__ : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_A )
__magic_name__ : List[str] = scheduler.timesteps
for i, timestep in enumerate(_A ):
if i == len(_A ) - 1:
__magic_name__ : Optional[int] = -1
else:
__magic_name__ : List[Any] = timesteps[i + 1]
__magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A )
__magic_name__ : Any = prev_t.item()
self.assertEqual(_A , _A )
def __lowerCAmelCase ( self : Tuple ) -> str:
__magic_name__ : str = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 1, 0]
__magic_name__ : Tuple = len(_A )
with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_A ) | 331 | 1 |
'''simple docstring'''
import inspect
import tempfile
from collections import OrderedDict, UserDict
from collections.abc import MutableMapping
from contextlib import ExitStack, contextmanager
from dataclasses import fields
from enum import Enum
from typing import Any, ContextManager, List, Tuple
import numpy as np
from .import_utils import is_flax_available, is_tf_available, is_torch_available, is_torch_fx_proxy
if is_flax_available():
import jax.numpy as jnp
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __get__( self : Tuple , _A : Optional[int] , _A : Any=None ) -> List[Any]:
# See docs.python.org/3/howto/descriptor.html#properties
if obj is None:
return self
if self.fget is None:
raise AttributeError('unreadable attribute' )
__magic_name__ : List[Any] = '__cached_' + self.fget.__name__
__magic_name__ : List[str] = getattr(_A , _A , _A )
if cached is None:
__magic_name__ : Union[str, Any] = self.fget(_A )
setattr(_A , _A , _A )
return cached
def lowerCamelCase ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = val.lower()
if val in {"y", "yes", "t", "true", "on", "1"}:
return 1
if val in {"n", "no", "f", "false", "off", "0"}:
return 0
raise ValueError(f'invalid truth value {val!r}' )
def lowerCamelCase ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
if is_torch_fx_proxy(lowerCAmelCase ):
return True
if is_torch_available():
import torch
if isinstance(lowerCAmelCase , torch.Tensor ):
return True
if is_tf_available():
import tensorflow as tf
if isinstance(lowerCAmelCase , tf.Tensor ):
return True
if is_flax_available():
import jax.numpy as jnp
from jax.core import Tracer
if isinstance(lowerCAmelCase , (jnp.ndarray, Tracer) ):
return True
return isinstance(lowerCAmelCase , np.ndarray )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
return isinstance(lowerCAmelCase , np.ndarray )
def lowerCamelCase ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
return _is_numpy(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
import torch
return isinstance(lowerCAmelCase , torch.Tensor )
def lowerCamelCase ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[str] ):
"""simple docstring"""
import torch
return isinstance(lowerCAmelCase , torch.device )
def lowerCamelCase ( lowerCAmelCase : List[str] ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_device(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
import torch
if isinstance(lowerCAmelCase , lowerCAmelCase ):
if hasattr(lowerCAmelCase , lowerCAmelCase ):
__magic_name__ : Dict = getattr(lowerCAmelCase , lowerCAmelCase )
else:
return False
return isinstance(lowerCAmelCase , torch.dtype )
def lowerCamelCase ( lowerCAmelCase : Dict ):
"""simple docstring"""
return False if not is_torch_available() else _is_torch_dtype(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : str ):
"""simple docstring"""
import tensorflow as tf
return isinstance(lowerCAmelCase , tf.Tensor )
def lowerCamelCase ( lowerCAmelCase : Dict ):
"""simple docstring"""
return False if not is_tf_available() else _is_tensorflow(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any ):
"""simple docstring"""
import tensorflow as tf
# the `is_symbolic_tensor` predicate is only available starting with TF 2.14
if hasattr(lowerCAmelCase , 'is_symbolic_tensor' ):
return tf.is_symbolic_tensor(lowerCAmelCase )
return type(lowerCAmelCase ) == tf.Tensor
def lowerCamelCase ( lowerCAmelCase : List[Any] ):
"""simple docstring"""
return False if not is_tf_available() else _is_tf_symbolic_tensor(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[str] ):
"""simple docstring"""
import jax.numpy as jnp # noqa: F811
return isinstance(lowerCAmelCase , jnp.ndarray )
def lowerCamelCase ( lowerCAmelCase : Any ):
"""simple docstring"""
return False if not is_flax_available() else _is_jax(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if isinstance(lowerCAmelCase , (dict, UserDict) ):
return {k: to_py_obj(lowerCAmelCase ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase , (list, tuple) ):
return [to_py_obj(lowerCAmelCase ) for o in obj]
elif is_tf_tensor(lowerCAmelCase ):
return obj.numpy().tolist()
elif is_torch_tensor(lowerCAmelCase ):
return obj.detach().cpu().tolist()
elif is_jax_tensor(lowerCAmelCase ):
return np.asarray(lowerCAmelCase ).tolist()
elif isinstance(lowerCAmelCase , (np.ndarray, np.number) ): # tolist also works on 0d np arrays
return obj.tolist()
else:
return obj
def lowerCamelCase ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if isinstance(lowerCAmelCase , (dict, UserDict) ):
return {k: to_numpy(lowerCAmelCase ) for k, v in obj.items()}
elif isinstance(lowerCAmelCase , (list, tuple) ):
return np.array(lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
return obj.numpy()
elif is_torch_tensor(lowerCAmelCase ):
return obj.detach().cpu().numpy()
elif is_jax_tensor(lowerCAmelCase ):
return np.asarray(lowerCAmelCase )
else:
return obj
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] ) -> Dict:
__magic_name__ : Optional[Any] = fields(self )
# Safety and consistency checks
if not len(_A ):
raise ValueError(F'{self.__class__.__name__} has no fields.' )
if not all(field.default is None for field in class_fields[1:] ):
raise ValueError(F'{self.__class__.__name__} should not have more than one required field.' )
__magic_name__ : List[Any] = getattr(self , class_fields[0].name )
__magic_name__ : Dict = all(getattr(self , field.name ) is None for field in class_fields[1:] )
if other_fields_are_none and not is_tensor(_A ):
if isinstance(_A , _A ):
__magic_name__ : Union[str, Any] = first_field.items()
__magic_name__ : Tuple = True
else:
try:
__magic_name__ : Optional[Any] = iter(_A )
__magic_name__ : List[str] = True
except TypeError:
__magic_name__ : str = False
# if we provided an iterator as first field and the iterator is a (key, value) iterator
# set the associated fields
if first_field_iterator:
for idx, element in enumerate(_A ):
if (
not isinstance(_A , (list, tuple) )
or not len(_A ) == 2
or not isinstance(element[0] , _A )
):
if idx == 0:
# If we do not have an iterator of key/values, set it as attribute
__magic_name__ : Any = first_field
else:
# If we have a mixed iterator, raise an error
raise ValueError(
F'Cannot set key/value for {element}. It needs to be a tuple (key, value).' )
break
setattr(self , element[0] , element[1] )
if element[1] is not None:
__magic_name__ : Tuple = element[1]
elif first_field is not None:
__magic_name__ : Union[str, Any] = first_field
else:
for field in class_fields:
__magic_name__ : Union[str, Any] = getattr(self , field.name )
if v is not None:
__magic_name__ : int = v
def __delitem__( self : Any , *_A : List[str] , **_A : Any ) -> Optional[Any]:
raise Exception(F'You cannot use ``__delitem__`` on a {self.__class__.__name__} instance.' )
def __lowerCAmelCase ( self : List[Any] , *_A : Dict , **_A : Optional[int] ) -> Optional[Any]:
raise Exception(F'You cannot use ``setdefault`` on a {self.__class__.__name__} instance.' )
def __lowerCAmelCase ( self : Dict , *_A : Dict , **_A : List[str] ) -> Dict:
raise Exception(F'You cannot use ``pop`` on a {self.__class__.__name__} instance.' )
def __lowerCAmelCase ( self : int , *_A : Any , **_A : List[str] ) -> Union[str, Any]:
raise Exception(F'You cannot use ``update`` on a {self.__class__.__name__} instance.' )
def __getitem__( self : List[str] , _A : Dict ) -> Dict:
if isinstance(_A , _A ):
__magic_name__ : str = dict(self.items() )
return inner_dict[k]
else:
return self.to_tuple()[k]
def __setattr__( self : Optional[int] , _A : Any , _A : Any ) -> List[Any]:
if name in self.keys() and value is not None:
# Don't call self.__setitem__ to avoid recursion errors
super().__setitem__(_A , _A )
super().__setattr__(_A , _A )
def __setitem__( self : List[str] , _A : str , _A : Optional[Any] ) -> List[str]:
# Will raise a KeyException if needed
super().__setitem__(_A , _A )
# Don't call self.__setattr__ to avoid recursion errors
super().__setattr__(_A , _A )
def __lowerCAmelCase ( self : List[str] ) -> Tuple[Any]:
return tuple(self[k] for k in self.keys() )
class _lowerCamelCase ( lowercase__ , lowercase__ ):
'''simple docstring'''
@classmethod
def __lowerCAmelCase ( cls : Dict , _A : int ) -> List[Any]:
raise ValueError(
F'{value} is not a valid {cls.__name__}, please select one of {list(cls._valueamember_map_.keys() )}' )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Union[str, Any] = """longest"""
A_ : Tuple = """max_length"""
A_ : Tuple = """do_not_pad"""
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : str = """pt"""
A_ : Union[str, Any] = """tf"""
A_ : Optional[int] = """np"""
A_ : Optional[int] = """jax"""
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : str , _A : List[ContextManager] ) -> Any:
__magic_name__ : Optional[int] = context_managers
__magic_name__ : Any = ExitStack()
def __enter__( self : Tuple ) -> List[str]:
for context_manager in self.context_managers:
self.stack.enter_context(_A )
def __exit__( self : Any , *_A : Optional[int] , **_A : Optional[int] ) -> str:
self.stack.__exit__(*_A , **_A )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
__magic_name__ : Dict = infer_framework(lowerCAmelCase )
if framework == "tf":
__magic_name__ : Tuple = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__magic_name__ : int = inspect.signature(model_class.forward ) # PyTorch models
else:
__magic_name__ : Tuple = inspect.signature(model_class.__call__ ) # Flax models
for p in signature.parameters:
if p == "return_loss" and signature.parameters[p].default is True:
return True
return False
def lowerCamelCase ( lowerCAmelCase : Dict ):
"""simple docstring"""
__magic_name__ : Dict = model_class.__name__
__magic_name__ : List[str] = infer_framework(lowerCAmelCase )
if framework == "tf":
__magic_name__ : Any = inspect.signature(model_class.call ) # TensorFlow models
elif framework == "pt":
__magic_name__ : Any = inspect.signature(model_class.forward ) # PyTorch models
else:
__magic_name__ : int = inspect.signature(model_class.__call__ ) # Flax models
if "QuestionAnswering" in model_name:
return [p for p in signature.parameters if "label" in p or p in ("start_positions", "end_positions")]
else:
return [p for p in signature.parameters if "label" in p]
def lowerCamelCase ( lowerCAmelCase : MutableMapping , lowerCAmelCase : str = "" , lowerCAmelCase : str = "." ):
"""simple docstring"""
def _flatten_dict(lowerCAmelCase : int , lowerCAmelCase : Dict="" , lowerCAmelCase : Dict="." ):
for k, v in d.items():
__magic_name__ : Optional[Any] = str(lowerCAmelCase ) + delimiter + str(lowerCAmelCase ) if parent_key else k
if v and isinstance(lowerCAmelCase , lowerCAmelCase ):
yield from flatten_dict(lowerCAmelCase , lowerCAmelCase , delimiter=lowerCAmelCase ).items()
else:
yield key, v
return dict(_flatten_dict(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase ) )
@contextmanager
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : bool = False ):
"""simple docstring"""
if use_temp_dir:
with tempfile.TemporaryDirectory() as tmp_dir:
yield tmp_dir
else:
yield working_dir
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[str]=None ):
"""simple docstring"""
if is_numpy_array(lowerCAmelCase ):
return np.transpose(lowerCAmelCase , axes=lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.T if axes is None else array.permute(*lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.transpose(lowerCAmelCase , perm=lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return jnp.transpose(lowerCAmelCase , axes=lowerCAmelCase )
else:
raise ValueError(f'Type not supported for transpose: {type(lowerCAmelCase )}.' )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : int ):
"""simple docstring"""
if is_numpy_array(lowerCAmelCase ):
return np.reshape(lowerCAmelCase , lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.reshape(*lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.reshape(lowerCAmelCase , lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return jnp.reshape(lowerCAmelCase , lowerCAmelCase )
else:
raise ValueError(f'Type not supported for reshape: {type(lowerCAmelCase )}.' )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Any=None ):
"""simple docstring"""
if is_numpy_array(lowerCAmelCase ):
return np.squeeze(lowerCAmelCase , axis=lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.squeeze() if axis is None else array.squeeze(dim=lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.squeeze(lowerCAmelCase , axis=lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return jnp.squeeze(lowerCAmelCase , axis=lowerCAmelCase )
else:
raise ValueError(f'Type not supported for squeeze: {type(lowerCAmelCase )}.' )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : List[Any] ):
"""simple docstring"""
if is_numpy_array(lowerCAmelCase ):
return np.expand_dims(lowerCAmelCase , lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.unsqueeze(dim=lowerCAmelCase )
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.expand_dims(lowerCAmelCase , axis=lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return jnp.expand_dims(lowerCAmelCase , axis=lowerCAmelCase )
else:
raise ValueError(f'Type not supported for expand_dims: {type(lowerCAmelCase )}.' )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
if is_numpy_array(lowerCAmelCase ):
return np.size(lowerCAmelCase )
elif is_torch_tensor(lowerCAmelCase ):
return array.numel()
elif is_tf_tensor(lowerCAmelCase ):
import tensorflow as tf
return tf.size(lowerCAmelCase )
elif is_jax_tensor(lowerCAmelCase ):
return array.size
else:
raise ValueError(f'Type not supported for expand_dims: {type(lowerCAmelCase )}.' )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Any ):
"""simple docstring"""
for key, value in auto_map.items():
if isinstance(lowerCAmelCase , (tuple, list) ):
__magic_name__ : str = [f'{repo_id}--{v}' if (v is not None and '--' not in v) else v for v in value]
elif value is not None and "--" not in value:
__magic_name__ : List[Any] = f'{repo_id}--{value}'
return auto_map
def lowerCamelCase ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
for base_class in inspect.getmro(lowerCAmelCase ):
__magic_name__ : str = base_class.__module__
__magic_name__ : int = base_class.__name__
if module.startswith('tensorflow' ) or module.startswith('keras' ) or name == "TFPreTrainedModel":
return "tf"
elif module.startswith('torch' ) or name == "PreTrainedModel":
return "pt"
elif module.startswith('flax' ) or module.startswith('jax' ) or name == "FlaxPreTrainedModel":
return "flax"
else:
raise TypeError(f'Could not infer framework from class {model_class}.' ) | 331 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = IFInpaintingPipeline
A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
return self._get_dummy_components()
def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]:
if str(_A ).startswith('mps' ):
__magic_name__ : Optional[Any] = torch.manual_seed(_A )
else:
__magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCAmelCase ( self : List[Any] ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __lowerCAmelCase ( self : Dict ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self : Tuple ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
self._test_save_load_local()
def __lowerCAmelCase ( self : Any ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 331 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : list[float] ):
"""simple docstring"""
if discount_rate < 0:
raise ValueError('Discount rate cannot be negative' )
if not cash_flows:
raise ValueError('Cash flows list cannot be empty' )
__magic_name__ : Optional[Any] = sum(
cash_flow / ((1 + discount_rate) ** i) for i, cash_flow in enumerate(lowerCAmelCase ) )
return round(lowerCAmelCase , ndigits=2 )
if __name__ == "__main__":
import doctest
doctest.testmod() | 331 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict:
__magic_name__ : Union[str, Any] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Optional[int] = seq_length
__magic_name__ : List[str] = is_training
__magic_name__ : Optional[Any] = use_input_mask
__magic_name__ : Dict = use_token_type_ids
__magic_name__ : str = use_labels
__magic_name__ : int = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : Dict = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Any = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Any = type_vocab_size
__magic_name__ : Union[str, Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : str = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : Any = relative_attention
__magic_name__ : str = position_biased_input
__magic_name__ : str = pos_att_type
__magic_name__ : Union[str, Any] = scope
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_input_mask:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__magic_name__ : int = None
if self.use_token_type_ids:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] = None
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
if self.use_labels:
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.get_config()
__magic_name__ : Union[str, Any] = 300
return config
def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]:
__magic_name__ : Dict = DebertaModel(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0]
__magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0]
__magic_name__ : List[str] = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict:
__magic_name__ : List[str] = DebertaForMaskedLM(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]:
__magic_name__ : Optional[int] = self.num_labels
__magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A )
model.to(_A )
model.eval()
__magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]:
__magic_name__ : str = self.num_labels
__magic_name__ : int = DebertaForTokenClassification(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]:
__magic_name__ : int = DebertaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[int] = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : int = config_and_inputs
__magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Tuple = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Union[str, Any] = True
A_ : Any = False
A_ : Dict = False
A_ : str = False
A_ : Dict = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : List[str] = DebertaModelTester(self )
__magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : Any ) -> str:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def __lowerCAmelCase ( self : str ) -> List[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : int = DebertaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
pass
@slow
def __lowerCAmelCase ( self : Dict ) -> Tuple:
__magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' )
__magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0]
# compare the actual values for a slice.
__magic_name__ : Tuple = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' ) | 331 | 1 |
'''simple docstring'''
import argparse
import struct
import unittest
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : int , _A : bytes ) -> None:
__magic_name__ : Dict = data
# Initialize hash values
__magic_name__ : Optional[int] = [
0X6_A_0_9_E_6_6_7,
0XB_B_6_7_A_E_8_5,
0X3_C_6_E_F_3_7_2,
0XA_5_4_F_F_5_3_A,
0X5_1_0_E_5_2_7_F,
0X9_B_0_5_6_8_8_C,
0X1_F_8_3_D_9_A_B,
0X5_B_E_0_C_D_1_9,
]
# Initialize round constants
__magic_name__ : Dict = [
0X4_2_8_A_2_F_9_8,
0X7_1_3_7_4_4_9_1,
0XB_5_C_0_F_B_C_F,
0XE_9_B_5_D_B_A_5,
0X3_9_5_6_C_2_5_B,
0X5_9_F_1_1_1_F_1,
0X9_2_3_F_8_2_A_4,
0XA_B_1_C_5_E_D_5,
0XD_8_0_7_A_A_9_8,
0X1_2_8_3_5_B_0_1,
0X2_4_3_1_8_5_B_E,
0X5_5_0_C_7_D_C_3,
0X7_2_B_E_5_D_7_4,
0X8_0_D_E_B_1_F_E,
0X9_B_D_C_0_6_A_7,
0XC_1_9_B_F_1_7_4,
0XE_4_9_B_6_9_C_1,
0XE_F_B_E_4_7_8_6,
0X0_F_C_1_9_D_C_6,
0X2_4_0_C_A_1_C_C,
0X2_D_E_9_2_C_6_F,
0X4_A_7_4_8_4_A_A,
0X5_C_B_0_A_9_D_C,
0X7_6_F_9_8_8_D_A,
0X9_8_3_E_5_1_5_2,
0XA_8_3_1_C_6_6_D,
0XB_0_0_3_2_7_C_8,
0XB_F_5_9_7_F_C_7,
0XC_6_E_0_0_B_F_3,
0XD_5_A_7_9_1_4_7,
0X0_6_C_A_6_3_5_1,
0X1_4_2_9_2_9_6_7,
0X2_7_B_7_0_A_8_5,
0X2_E_1_B_2_1_3_8,
0X4_D_2_C_6_D_F_C,
0X5_3_3_8_0_D_1_3,
0X6_5_0_A_7_3_5_4,
0X7_6_6_A_0_A_B_B,
0X8_1_C_2_C_9_2_E,
0X9_2_7_2_2_C_8_5,
0XA_2_B_F_E_8_A_1,
0XA_8_1_A_6_6_4_B,
0XC_2_4_B_8_B_7_0,
0XC_7_6_C_5_1_A_3,
0XD_1_9_2_E_8_1_9,
0XD_6_9_9_0_6_2_4,
0XF_4_0_E_3_5_8_5,
0X1_0_6_A_A_0_7_0,
0X1_9_A_4_C_1_1_6,
0X1_E_3_7_6_C_0_8,
0X2_7_4_8_7_7_4_C,
0X3_4_B_0_B_C_B_5,
0X3_9_1_C_0_C_B_3,
0X4_E_D_8_A_A_4_A,
0X5_B_9_C_C_A_4_F,
0X6_8_2_E_6_F_F_3,
0X7_4_8_F_8_2_E_E,
0X7_8_A_5_6_3_6_F,
0X8_4_C_8_7_8_1_4,
0X8_C_C_7_0_2_0_8,
0X9_0_B_E_F_F_F_A,
0XA_4_5_0_6_C_E_B,
0XB_E_F_9_A_3_F_7,
0XC_6_7_1_7_8_F_2,
]
__magic_name__ : str = self.preprocessing(self.data )
self.final_hash()
@staticmethod
def __lowerCAmelCase ( _A : bytes ) -> bytes:
__magic_name__ : int = b'\x80' + (b'\x00' * (63 - (len(_A ) + 8) % 64))
__magic_name__ : Optional[int] = struct.pack('>Q' , (len(_A ) * 8) )
return data + padding + big_endian_integer
def __lowerCAmelCase ( self : Optional[int] ) -> None:
# Convert into blocks of 64 bytes
__magic_name__ : str = [
self.preprocessed_data[x : x + 64]
for x in range(0 , len(self.preprocessed_data ) , 64 )
]
for block in self.blocks:
# Convert the given block into a list of 4 byte integers
__magic_name__ : Optional[int] = list(struct.unpack('>16L' , _A ) )
# add 48 0-ed integers
words += [0] * 48
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = self.hashes
for index in range(0 , 64 ):
if index > 15:
# modify the zero-ed indexes at the end of the array
__magic_name__ : Optional[int] = (
self.ror(words[index - 15] , 7 )
^ self.ror(words[index - 15] , 18 )
^ (words[index - 15] >> 3)
)
__magic_name__ : Any = (
self.ror(words[index - 2] , 17 )
^ self.ror(words[index - 2] , 19 )
^ (words[index - 2] >> 10)
)
__magic_name__ : List[Any] = (
words[index - 16] + sa + words[index - 7] + sa
) % 0X1_0_0_0_0_0_0_0_0
# Compression
__magic_name__ : List[str] = self.ror(_A , 6 ) ^ self.ror(_A , 11 ) ^ self.ror(_A , 25 )
__magic_name__ : Tuple = (e & f) ^ ((~e & 0XF_F_F_F_F_F_F_F) & g)
__magic_name__ : Optional[int] = (
h + sa + ch + self.round_constants[index] + words[index]
) % 0X1_0_0_0_0_0_0_0_0
__magic_name__ : Tuple = self.ror(_A , 2 ) ^ self.ror(_A , 13 ) ^ self.ror(_A , 22 )
__magic_name__ : Optional[Any] = (a & b) ^ (a & c) ^ (b & c)
__magic_name__ : Optional[int] = (sa + maj) % 0X1_0_0_0_0_0_0_0_0
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ : str = (
g,
f,
e,
((d + tempa) % 0X1_0_0_0_0_0_0_0_0),
c,
b,
a,
((tempa + tempa) % 0X1_0_0_0_0_0_0_0_0),
)
__magic_name__ : Tuple = [a, b, c, d, e, f, g, h]
# Modify final values
__magic_name__ : List[str] = [
((element + mutated_hash_values[index]) % 0X1_0_0_0_0_0_0_0_0)
for index, element in enumerate(self.hashes )
]
__magic_name__ : List[str] = ''.join([hex(_A )[2:].zfill(8 ) for value in self.hashes] )
def __lowerCAmelCase ( self : int , _A : int , _A : int ) -> int:
return 0XF_F_F_F_F_F_F_F & (value << (32 - rotations)) | (value >> rotations)
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> None:
import hashlib
__magic_name__ : int = bytes('Test String' , 'utf-8' )
self.assertEqual(SHAaaa(_A ).hash , hashlib.shaaaa(_A ).hexdigest() )
def lowerCamelCase ( ):
"""simple docstring"""
import doctest
doctest.testmod()
__magic_name__ : Optional[int] = argparse.ArgumentParser()
parser.add_argument(
'-s' , '--string' , dest='input_string' , default='Hello World!! Welcome to Cryptography' , help='Hash the string' , )
parser.add_argument(
'-f' , '--file' , dest='input_file' , help='Hash contents of a file' )
__magic_name__ : Tuple = parser.parse_args()
__magic_name__ : Optional[Any] = args.input_string
# hash input should be a bytestring
if args.input_file:
with open(args.input_file , 'rb' ) as f:
__magic_name__ : List[Any] = f.read()
else:
__magic_name__ : Dict = bytes(lowerCAmelCase , 'utf-8' )
print(SHAaaa(lowerCAmelCase ).hash )
if __name__ == "__main__":
main() | 331 |
'''simple docstring'''
class _lowerCamelCase : # Public class to implement a graph
'''simple docstring'''
def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
__magic_name__ : Tuple = row
__magic_name__ : str = col
__magic_name__ : Optional[Any] = graph
def __lowerCAmelCase ( self : Any , _A : int , _A : int , _A : list[list[bool]] ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
# Checking all 8 elements surrounding nth element
__magic_name__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__magic_name__ : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1]
__magic_name__ : Optional[int] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A )
def __lowerCAmelCase ( self : int ) -> int: # And finally, count all islands.
__magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )]
__magic_name__ : Any = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_A , _A , _A )
count += 1
return count | 331 | 1 |
'''simple docstring'''
from typing import List, Optional, Union
import numpy as np
import PIL.Image
from ...image_processing_utils import BaseImageProcessor, BatchFeature
from ...image_transforms import rescale, resize, to_channel_dimension_format
from ...image_utils import (
ChannelDimension,
PILImageResampling,
get_image_size,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, logging
lowerCAmelCase :Tuple = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[Any] = ["""pixel_values"""]
def __init__( self : int , _A : bool = True , _A : int = 32 , _A : List[Any]=PILImageResampling.BILINEAR , _A : bool = True , **_A : List[str] , ) -> None:
__magic_name__ : int = do_resize
__magic_name__ : List[str] = do_rescale
__magic_name__ : str = size_divisor
__magic_name__ : Optional[Any] = resample
super().__init__(**_A )
def __lowerCAmelCase ( self : Any , _A : np.ndarray , _A : int , _A : Optional[int] , _A : Optional[ChannelDimension] = None , **_A : Union[str, Any] ) -> np.ndarray:
__magic_name__ , __magic_name__ : str = get_image_size(_A )
# Rounds the height and width down to the closest multiple of size_divisor
__magic_name__ : Optional[int] = height // size_divisor * size_divisor
__magic_name__ : Union[str, Any] = width // size_divisor * size_divisor
__magic_name__ : Union[str, Any] = resize(_A , (new_h, new_w) , resample=_A , data_format=_A , **_A )
return image
def __lowerCAmelCase ( self : Tuple , _A : np.ndarray , _A : float , _A : Optional[ChannelDimension] = None , **_A : int ) -> np.ndarray:
return rescale(image=_A , scale=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : Union["PIL.Image.Image", TensorType, List["PIL.Image.Image"], List[TensorType]] , _A : Optional[bool] = None , _A : Optional[int] = None , _A : Any=None , _A : Optional[bool] = None , _A : Optional[Union[TensorType, str]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> BatchFeature:
__magic_name__ : int = do_resize if do_resize is not None else self.do_resize
__magic_name__ : Union[str, Any] = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : List[Any] = size_divisor if size_divisor is not None else self.size_divisor
__magic_name__ : List[Any] = resample if resample is not None else self.resample
if do_resize and size_divisor is None:
raise ValueError('size_divisor is required for resizing' )
__magic_name__ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError('Invalid image(s)' )
# All transformations expect numpy arrays.
__magic_name__ : List[str] = [to_numpy_array(_A ) for img in images]
if do_resize:
__magic_name__ : Optional[int] = [self.resize(_A , size_divisor=_A , resample=_A ) for image in images]
if do_rescale:
__magic_name__ : List[Any] = [self.rescale(_A , scale=1 / 255 ) for image in images]
__magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
__magic_name__ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A ) | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | 1 |
'''simple docstring'''
import gzip
import hashlib
import json
import multiprocessing
import os
import re
import shutil
import time
from pathlib import Path
import numpy as np
from arguments import PreprocessingArguments
from datasets import load_dataset
from minhash_deduplication import deduplicate_dataset
from transformers import AutoTokenizer, HfArgumentParser
lowerCAmelCase :Optional[Any] = re.compile(r'''\s+''')
def lowerCamelCase ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return {"hash": hashlib.mda(re.sub(lowerCAmelCase , '' , example['content'] ).encode('utf-8' ) ).hexdigest()}
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
__magic_name__ : Optional[int] = [len(lowerCAmelCase ) for line in example['content'].splitlines()]
return {"line_mean": np.mean(lowerCAmelCase ), "line_max": max(lowerCAmelCase )}
def lowerCamelCase ( lowerCAmelCase : Optional[int] ):
"""simple docstring"""
__magic_name__ : str = np.mean([c.isalnum() for c in example['content']] )
return {"alpha_frac": alpha_frac}
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] , lowerCAmelCase : str ):
"""simple docstring"""
if example["hash"] in uniques:
uniques.remove(example['hash'] )
return True
else:
return False
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any]=5 ):
"""simple docstring"""
__magic_name__ : Dict = ['auto-generated', 'autogenerated', 'automatically generated']
__magic_name__ : Optional[Any] = example['content'].splitlines()
for _, line in zip(range(lowerCAmelCase ) , lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"autogenerated": True}
else:
return {"autogenerated": False}
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Any=5 , lowerCAmelCase : Union[str, Any]=0.05 ):
"""simple docstring"""
__magic_name__ : Dict = ['unit tests', 'test file', 'configuration file']
__magic_name__ : Union[str, Any] = example['content'].splitlines()
__magic_name__ : List[Any] = 0
__magic_name__ : str = 0
# first test
for _, line in zip(range(lowerCAmelCase ) , lowerCAmelCase ):
for keyword in keywords:
if keyword in line.lower():
return {"config_or_test": True}
# second test
__magic_name__ : Tuple = example['content'].count('\n' )
__magic_name__ : Any = int(coeff * nlines )
for line in lines:
count_config += line.lower().count('config' )
count_test += line.lower().count('test' )
if count_config > threshold or count_test > threshold:
return {"config_or_test": True}
return {"config_or_test": False}
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : List[Any] = ['def ', 'class ', 'for ', 'while ']
__magic_name__ : Dict = example['content'].splitlines()
for line in lines:
for keyword in keywords:
if keyword in line.lower():
return {"has_no_keywords": False}
return {"has_no_keywords": True}
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : int=4 ):
"""simple docstring"""
__magic_name__ : Any = example['content'].splitlines()
__magic_name__ : List[str] = 0
for line in lines:
counter += line.lower().count('=' )
if counter > minimum:
return {"has_few_assignments": False}
return {"has_few_assignments": True}
def lowerCamelCase ( lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
__magic_name__ : Tuple = tokenizer(example['content'] , truncation=lowerCAmelCase )['input_ids']
__magic_name__ : int = len(example['content'] ) / len(lowerCAmelCase )
return {"ratio": ratio}
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
__magic_name__ : Union[str, Any] = {}
results.update(get_hash(lowerCAmelCase ) )
results.update(line_stats(lowerCAmelCase ) )
results.update(alpha_stats(lowerCAmelCase ) )
results.update(char_token_ratio(lowerCAmelCase ) )
results.update(is_autogenerated(lowerCAmelCase ) )
results.update(is_config_or_test(lowerCAmelCase ) )
results.update(has_no_keywords(lowerCAmelCase ) )
results.update(has_few_assignments(lowerCAmelCase ) )
return results
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
if not check_uniques(lowerCAmelCase , lowerCAmelCase ):
return False
elif example["autogenerated"]:
return False
elif example["line_max"] > args.line_max:
return False
elif example["line_mean"] > args.line_mean:
return False
elif example["alpha_frac"] < args.alpha_frac:
return False
elif example["ratio"] < args.min_token_ratio:
return False
elif example["config_or_test"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_no_keywords"] and np.random.rand() <= args.filter_proba:
return False
elif example["has_few_assignments"]:
return False
else:
return True
def lowerCamelCase ( lowerCAmelCase : Dict ):
"""simple docstring"""
with open(lowerCAmelCase , 'rb' ) as f_in:
with gzip.open(str(lowerCAmelCase ) + '.gz' , 'wb' , compresslevel=6 ) as f_out:
shutil.copyfileobj(lowerCAmelCase , lowerCAmelCase )
os.unlink(lowerCAmelCase )
# Settings
lowerCAmelCase :Any = HfArgumentParser(PreprocessingArguments)
lowerCAmelCase :List[Any] = parser.parse_args()
if args.num_workers is None:
lowerCAmelCase :Union[str, Any] = multiprocessing.cpu_count()
lowerCAmelCase :int = AutoTokenizer.from_pretrained(args.tokenizer_dir)
# Load dataset
lowerCAmelCase :Any = time.time()
lowerCAmelCase :Union[str, Any] = load_dataset(args.dataset_name, split='''train''')
print(F'Time to load dataset: {time.time()-t_start:.2f}')
# Run preprocessing
lowerCAmelCase :Dict = time.time()
lowerCAmelCase :Optional[int] = ds.map(preprocess, num_proc=args.num_workers)
print(F'Time to preprocess dataset: {time.time()-t_start:.2f}')
# Deduplicate hashes
lowerCAmelCase :Optional[int] = set(ds.unique('''hash'''))
lowerCAmelCase :List[str] = len(uniques) / len(ds)
print(F'Fraction of duplicates: {1-frac:.2%}')
# Deduplicate data and apply heuristics
lowerCAmelCase :Optional[Any] = time.time()
lowerCAmelCase :Any = ds.filter(filter, fn_kwargs={'''uniques''': uniques, '''args''': args})
print(F'Time to filter dataset: {time.time()-t_start:.2f}')
print(F'Size of filtered dataset: {len(ds_filter)}')
# Deduplicate with minhash and jaccard similarity
if args.near_deduplication:
lowerCAmelCase :Any = time.time()
lowerCAmelCase , lowerCAmelCase :Union[str, Any] = deduplicate_dataset(ds_filter, args.jaccard_threshold)
print(F'Time to deduplicate dataset: {time.time()-t_start:.2f}')
print(F'Size of deduplicate dataset: {len(ds_filter)}')
# Save data in batches of samples_per_file
lowerCAmelCase :Optional[int] = Path(args.output_dir)
output_dir.mkdir(exist_ok=True)
# save duplicate_clusters in the output_dir as artifacts
# not sure it is the right place the save it
if args.near_deduplication:
with open(output_dir / '''duplicate_clusters.json''', '''w''') as f:
json.dump(duplicate_clusters, f)
lowerCAmelCase :Tuple = output_dir / '''data'''
data_dir.mkdir(exist_ok=True)
lowerCAmelCase :int = time.time()
for file_number, index in enumerate(range(0, len(ds_filter), args.samples_per_file)):
lowerCAmelCase :List[Any] = str(data_dir / F'file-{file_number+1:012}.json')
lowerCAmelCase :Optional[int] = min(len(ds_filter), index + args.samples_per_file)
ds_filter.select(list(range(index, end_index))).to_json(file_path)
compress_file(file_path)
print(F'Time to save dataset: {time.time()-t_start:.2f}') | 331 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ):
"""simple docstring"""
__magic_name__ : list[int] = [0]
__magic_name__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__magic_name__ : int = 0
# the area corresponding to the grid that gives the product closest to target
__magic_name__ : int = 0
# an estimate of b, using the quadratic formula
__magic_name__ : float
# the largest integer less than b_estimate
__magic_name__ : int
# the largest integer less than b_estimate
__magic_name__ : int
# the triangle number corresponding to b_floor
__magic_name__ : int
# the triangle number corresponding to b_ceil
__magic_name__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__magic_name__ : List[Any] = floor(lowerCAmelCase )
__magic_name__ : Dict = ceil(lowerCAmelCase )
__magic_name__ : Any = triangle_numbers[b_floor]
__magic_name__ : Optional[int] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : Any = triangle_b_first_guess * triangle_a
__magic_name__ : Any = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : List[str] = triangle_b_second_guess * triangle_a
__magic_name__ : Optional[int] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'{solution() = }') | 331 | 1 |
'''simple docstring'''
import functools
import operator
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase :Dict = logging.get_logger(__name__)
lowerCAmelCase :Optional[int] = {
'''microsoft/wavlm-base''': '''https://huggingface.co/microsoft/wavlm-base/resolve/main/config.json''',
# See all WavLM models at https://huggingface.co/models?filter=wavlm
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[Any] = """wavlm"""
def __init__( self : Optional[Any] , _A : Any=32 , _A : Dict=768 , _A : int=12 , _A : Union[str, Any]=12 , _A : Union[str, Any]=3072 , _A : List[str]="gelu" , _A : Optional[int]=0.1 , _A : Dict=0.1 , _A : Union[str, Any]=0.1 , _A : Any=0.0 , _A : Union[str, Any]=0.1 , _A : Optional[Any]=0.1 , _A : int=0.02 , _A : Optional[Any]=1E-5 , _A : Any="group" , _A : List[str]="gelu" , _A : int=(512, 512, 512, 512, 512, 512, 512) , _A : Any=(5, 2, 2, 2, 2, 2, 2) , _A : Optional[Any]=(10, 3, 3, 3, 3, 2, 2) , _A : Dict=False , _A : Any=128 , _A : Optional[Any]=16 , _A : int=320 , _A : List[Any]=800 , _A : str=False , _A : str=True , _A : Union[str, Any]=0.05 , _A : int=10 , _A : int=2 , _A : str=0.0 , _A : Tuple=10 , _A : Tuple=320 , _A : Union[str, Any]=2 , _A : List[str]=0.1 , _A : str=100 , _A : int=256 , _A : int=256 , _A : int=0.1 , _A : str="mean" , _A : List[Any]=False , _A : Any=False , _A : List[str]=256 , _A : Any=(512, 512, 512, 512, 1500) , _A : Optional[Any]=(5, 3, 3, 1, 1) , _A : Any=(1, 2, 3, 1, 1) , _A : str=512 , _A : List[str]=80 , _A : str=0 , _A : List[str]=1 , _A : Optional[Any]=2 , _A : str=False , _A : Dict=3 , _A : int=2 , _A : Union[str, Any]=3 , _A : Dict=None , **_A : Optional[Any] , ) -> Optional[int]:
super().__init__(**_A , pad_token_id=_A , bos_token_id=_A , eos_token_id=_A )
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : Any = feat_extract_norm
__magic_name__ : Tuple = feat_extract_activation
__magic_name__ : int = list(_A )
__magic_name__ : List[str] = list(_A )
__magic_name__ : Any = list(_A )
__magic_name__ : int = conv_bias
__magic_name__ : int = num_buckets
__magic_name__ : Tuple = max_bucket_distance
__magic_name__ : str = num_conv_pos_embeddings
__magic_name__ : Union[str, Any] = num_conv_pos_embedding_groups
__magic_name__ : Dict = len(self.conv_dim )
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Any = hidden_act
__magic_name__ : Tuple = num_attention_heads
__magic_name__ : str = hidden_dropout
__magic_name__ : Union[str, Any] = attention_dropout
__magic_name__ : int = activation_dropout
__magic_name__ : str = feat_proj_dropout
__magic_name__ : Union[str, Any] = final_dropout
__magic_name__ : Union[str, Any] = layerdrop
__magic_name__ : Dict = layer_norm_eps
__magic_name__ : Optional[Any] = initializer_range
__magic_name__ : str = num_ctc_classes
__magic_name__ : str = vocab_size
__magic_name__ : Dict = do_stable_layer_norm
__magic_name__ : Optional[Any] = use_weighted_layer_sum
__magic_name__ : Dict = classifier_proj_size
if (
(len(self.conv_stride ) != self.num_feat_extract_layers)
or (len(self.conv_kernel ) != self.num_feat_extract_layers)
or (len(self.conv_dim ) != self.num_feat_extract_layers)
):
raise ValueError(
'Configuration for convolutional layers is incorrect. It is required that `len(config.conv_dim)` =='
' `len(config.conv_stride)` == `len(config.conv_kernel)`, but is `len(config.conv_dim) ='
F' {len(self.conv_dim )}`, `len(config.conv_stride) = {len(self.conv_stride )}`,'
F' `len(config.conv_kernel) = {len(self.conv_kernel )}`.' )
# fine-tuning config parameters for SpecAugment: https://arxiv.org/abs/1904.08779
__magic_name__ : Optional[int] = apply_spec_augment
__magic_name__ : Optional[Any] = mask_time_prob
__magic_name__ : Optional[int] = mask_time_length
__magic_name__ : int = mask_time_min_masks
__magic_name__ : Optional[Any] = mask_feature_prob
__magic_name__ : List[str] = mask_feature_length
# parameters for pretraining with codevector quantized representations
__magic_name__ : Union[str, Any] = num_codevectors_per_group
__magic_name__ : Optional[int] = num_codevector_groups
__magic_name__ : Dict = contrastive_logits_temperature
__magic_name__ : List[Any] = num_negatives
__magic_name__ : Any = codevector_dim
__magic_name__ : Union[str, Any] = proj_codevector_dim
__magic_name__ : str = diversity_loss_weight
# ctc loss
__magic_name__ : int = ctc_loss_reduction
__magic_name__ : Union[str, Any] = ctc_zero_infinity
# adapter
__magic_name__ : Tuple = add_adapter
__magic_name__ : Any = adapter_kernel_size
__magic_name__ : List[str] = adapter_stride
__magic_name__ : List[Any] = num_adapter_layers
__magic_name__ : List[Any] = output_hidden_size or hidden_size
# SequenceClassification-specific parameter. Feel free to ignore for other classes.
__magic_name__ : str = classifier_proj_size
# XVector-specific parameters. Feel free to ignore for other classes.
__magic_name__ : Any = list(_A )
__magic_name__ : Optional[Any] = list(_A )
__magic_name__ : Optional[int] = list(_A )
__magic_name__ : Union[str, Any] = xvector_output_dim
@property
def __lowerCAmelCase ( self : int ) -> Optional[Any]:
return functools.reduce(operator.mul , self.conv_stride , 1 ) | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Dict = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Tuple = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Any = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 331 | 1 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Optional[int]=7 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=True , _A : Dict=True , _A : int=99 , _A : str=32 , _A : List[Any]=2 , _A : Any=4 , _A : List[str]=37 , _A : List[str]="gelu" , _A : Any=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Union[str, Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : str=4 , _A : int=None , ) -> int:
__magic_name__ : str = parent
__magic_name__ : List[Any] = 13
__magic_name__ : Union[str, Any] = 7
__magic_name__ : Tuple = True
__magic_name__ : Dict = True
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = True
__magic_name__ : int = 99
__magic_name__ : List[str] = 384
__magic_name__ : Optional[int] = 2
__magic_name__ : List[Any] = 4
__magic_name__ : int = 37
__magic_name__ : Union[str, Any] = 'gelu'
__magic_name__ : Optional[int] = 0.1
__magic_name__ : str = 0.1
__magic_name__ : Optional[Any] = 512
__magic_name__ : Any = 16
__magic_name__ : Union[str, Any] = 2
__magic_name__ : Any = 0.02
__magic_name__ : List[str] = 3
__magic_name__ : Tuple = 4
__magic_name__ : List[Any] = 128
__magic_name__ : Optional[Any] = 2
__magic_name__ : List[str] = 9
__magic_name__ : str = 1
__magic_name__ : List[str] = None
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[Any] = None
if self.use_input_mask:
__magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[str] = None
if self.use_token_type_ids:
__magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
__magic_name__ : int = None
if self.use_labels:
__magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : int , _A : int , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : int , _A : Union[str, Any] ) -> Any:
__magic_name__ : Dict = TFConvBertModel(config=_A )
__magic_name__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__magic_name__ : Any = [input_ids, input_mask]
__magic_name__ : Tuple = model(_A )
__magic_name__ : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : int , _A : str , _A : Dict , _A : Dict , _A : Dict , _A : Any , _A : Optional[int] , _A : int ) -> Optional[Any]:
__magic_name__ : Dict = TFConvBertForMaskedLM(config=_A )
__magic_name__ : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> Tuple:
__magic_name__ : Any = self.num_labels
__magic_name__ : str = TFConvBertForSequenceClassification(config=_A )
__magic_name__ : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : str , _A : str , _A : int , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = self.num_choices
__magic_name__ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
__magic_name__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : str = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Optional[int] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : List[Any] , _A : int , _A : List[str] , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : Optional[int] ) -> List[Any]:
__magic_name__ : List[Any] = self.num_labels
__magic_name__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
__magic_name__ : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[str] ) -> int:
__magic_name__ : Dict = TFConvBertForQuestionAnswering(config=_A )
__magic_name__ : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[str] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : str = config_and_inputs
__magic_name__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : List[str] = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Tuple = False
A_ : Any = False
A_ : List[Any] = False
def __lowerCAmelCase ( self : List[Any] ) -> int:
__magic_name__ : Optional[Any] = TFConvBertModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : int ) -> Any:
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : Dict ) -> List[str]:
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[int] = True
__magic_name__ : Any = True
if hasattr(_A , 'use_cache' ):
__magic_name__ : List[Any] = True
__magic_name__ : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : Optional[Any] = getattr(self.model_tester , 'key_length' , _A )
for model_class in self.all_model_classes:
__magic_name__ : List[str] = self._prepare_for_class(_A , _A )
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Tuple = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
__magic_name__ : Union[str, Any] = os.path.join(_A , 'saved_model' , '1' )
__magic_name__ : Optional[int] = tf.keras.models.load_model(_A )
__magic_name__ : Optional[Any] = model(_A )
if self.is_encoder_decoder:
__magic_name__ : Optional[int] = outputs['encoder_hidden_states']
__magic_name__ : Tuple = outputs['encoder_attentions']
else:
__magic_name__ : Union[str, Any] = outputs['hidden_states']
__magic_name__ : Optional[Any] = outputs['attentions']
self.assertEqual(len(_A ) , _A )
__magic_name__ : Optional[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
__magic_name__ : Optional[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_A )
def __lowerCAmelCase ( self : List[str] ) -> Any:
__magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : str = True
__magic_name__ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'key_length' , _A )
__magic_name__ : Optional[int] = getattr(self.model_tester , 'key_length' , _A )
def check_decoder_attentions_output(_A : List[Any] ):
__magic_name__ : Tuple = len(_A )
self.assertEqual(out_len % 2 , 0 )
__magic_name__ : Any = outputs.decoder_attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_A : int ):
__magic_name__ : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = False
__magic_name__ : List[str] = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
__magic_name__ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
__magic_name__ : Any = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ : Optional[int] = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Optional[int] = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
__magic_name__ : str = True
__magic_name__ : str = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : str = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) )
self.assertEqual(model.config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : int ) -> int:
__magic_name__ : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__magic_name__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : Tuple = model(_A )[0]
__magic_name__ : str = [1, 6, 768]
self.assertEqual(output.shape , _A )
__magic_name__ : Tuple = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 ) | 331 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = ["""pixel_values"""]
def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None:
super().__init__(**_A )
__magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384}
__magic_name__ : Dict = get_size_dict(_A , default_to_square=_A )
__magic_name__ : List[Any] = do_resize
__magic_name__ : str = size
# Default value set here for backwards compatibility where the value in config is None
__magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256
__magic_name__ : int = resample
__magic_name__ : List[str] = do_rescale
__magic_name__ : List[Any] = rescale_factor
__magic_name__ : str = do_normalize
__magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
__magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
__magic_name__ : Dict = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__magic_name__ : Dict = int(shortest_edge / crop_pct )
__magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
__magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int:
return rescale(_A , scale=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image:
__magic_name__ : int = do_resize if do_resize is not None else self.do_resize
__magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
__magic_name__ : Optional[Any] = resample if resample is not None else self.resample
__magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : str = image_mean if image_mean is not None else self.image_mean
__magic_name__ : Dict = image_std if image_std is not None else self.image_std
__magic_name__ : Dict = size if size is not None else self.size
__magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A )
__magic_name__ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
__magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images]
if do_rescale:
__magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
__magic_name__ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A ) | 331 | 1 |
'''simple docstring'''
import unittest
from transformers import AlbertConfig, is_torch_available
from transformers.models.auto import get_values
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
MODEL_FOR_PRETRAINING_MAPPING,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
)
from transformers.models.albert.modeling_albert import ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Tuple , _A : Dict , _A : str=13 , _A : List[Any]=7 , _A : Dict=True , _A : Any=True , _A : int=True , _A : Any=True , _A : List[Any]=99 , _A : str=16 , _A : Optional[Any]=36 , _A : str=6 , _A : List[Any]=6 , _A : Tuple=6 , _A : Union[str, Any]=37 , _A : Optional[Any]="gelu" , _A : List[Any]=0.1 , _A : int=0.1 , _A : Optional[Any]=512 , _A : Optional[int]=16 , _A : Optional[Any]=2 , _A : Optional[Any]=0.02 , _A : Dict=3 , _A : Optional[int]=4 , _A : Any=None , ) -> str:
__magic_name__ : List[str] = parent
__magic_name__ : List[Any] = batch_size
__magic_name__ : Tuple = seq_length
__magic_name__ : List[str] = is_training
__magic_name__ : Union[str, Any] = use_input_mask
__magic_name__ : List[str] = use_token_type_ids
__magic_name__ : List[str] = use_labels
__magic_name__ : Tuple = vocab_size
__magic_name__ : Union[str, Any] = embedding_size
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : Any = num_hidden_layers
__magic_name__ : Tuple = num_hidden_groups
__magic_name__ : Any = num_attention_heads
__magic_name__ : Optional[Any] = intermediate_size
__magic_name__ : Dict = hidden_act
__magic_name__ : List[str] = hidden_dropout_prob
__magic_name__ : List[Any] = attention_probs_dropout_prob
__magic_name__ : int = max_position_embeddings
__magic_name__ : Optional[Any] = type_vocab_size
__magic_name__ : int = type_sequence_label_size
__magic_name__ : str = initializer_range
__magic_name__ : str = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : List[Any] = scope
def __lowerCAmelCase ( self : int ) -> Tuple:
__magic_name__ : List[Any] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Any = None
if self.use_input_mask:
__magic_name__ : List[Any] = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : Optional[int] = None
if self.use_token_type_ids:
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Optional[int] = None
__magic_name__ : int = None
__magic_name__ : int = None
if self.use_labels:
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Dict = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : int = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
return AlbertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , num_hidden_groups=self.num_hidden_groups , )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Optional[int] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple:
__magic_name__ : List[str] = AlbertModel(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Union[str, Any] = model(_A , attention_mask=_A , token_type_ids=_A )
__magic_name__ : str = model(_A , token_type_ids=_A )
__magic_name__ : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
self.parent.assertEqual(result.pooler_output.shape , (self.batch_size, self.hidden_size) )
def __lowerCAmelCase ( self : List[Any] , _A : Any , _A : List[Any] , _A : Optional[int] , _A : str , _A : Optional[int] , _A : Tuple , _A : Optional[Any] ) -> List[Any]:
__magic_name__ : int = AlbertForPreTraining(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Tuple = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , sentence_order_label=_A , )
self.parent.assertEqual(result.prediction_logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
self.parent.assertEqual(result.sop_logits.shape , (self.batch_size, config.num_labels) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Dict , _A : Optional[Any] , _A : Dict , _A : List[str] ) -> Any:
__magic_name__ : Optional[Any] = AlbertForMaskedLM(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Union[str, Any] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : Union[str, Any] , _A : str , _A : List[str] , _A : List[Any] , _A : Optional[Any] , _A : int ) -> List[str]:
__magic_name__ : Dict = AlbertForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[int] = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Dict , _A : Optional[int] , _A : str , _A : Optional[Any] , _A : Any , _A : Dict , _A : Tuple , _A : Optional[Any] ) -> int:
__magic_name__ : Optional[Any] = self.num_labels
__magic_name__ : Tuple = AlbertForSequenceClassification(_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : Dict , _A : Any , _A : int , _A : int , _A : Tuple , _A : str , _A : str ) -> int:
__magic_name__ : Union[str, Any] = self.num_labels
__magic_name__ : List[Any] = AlbertForTokenClassification(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Tuple = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : List[Any] , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : Tuple , _A : Tuple , _A : int ) -> List[str]:
__magic_name__ : Dict = self.num_choices
__magic_name__ : int = AlbertForMultipleChoice(config=_A )
model.to(_A )
model.eval()
__magic_name__ : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : Any = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
__magic_name__ : Tuple = model(
_A , attention_mask=_A , token_type_ids=_A , labels=_A , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
__magic_name__ : List[str] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : str = config_and_inputs
__magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = (
(
AlbertModel,
AlbertForPreTraining,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Union[str, Any] = (
{
"""feature-extraction""": AlbertModel,
"""fill-mask""": AlbertForMaskedLM,
"""question-answering""": AlbertForQuestionAnswering,
"""text-classification""": AlbertForSequenceClassification,
"""token-classification""": AlbertForTokenClassification,
"""zero-shot""": AlbertForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : List[str] = True
def __lowerCAmelCase ( self : Dict , _A : Dict , _A : Union[str, Any] , _A : Optional[int]=False ) -> List[str]:
__magic_name__ : Union[str, Any] = super()._prepare_for_class(_A , _A , return_labels=_A )
if return_labels:
if model_class in get_values(_A ):
__magic_name__ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length) , dtype=torch.long , device=_A )
__magic_name__ : List[Any] = torch.zeros(
self.model_tester.batch_size , dtype=torch.long , device=_A )
return inputs_dict
def __lowerCAmelCase ( self : Union[str, Any] ) -> Tuple:
__magic_name__ : str = AlbertModelTester(self )
__magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : int ) -> List[Any]:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_A )
def __lowerCAmelCase ( self : Any ) -> List[Any]:
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
__magic_name__ : Dict = type
self.model_tester.create_and_check_model(*_A )
@slow
def __lowerCAmelCase ( self : List[str] ) -> Union[str, Any]:
for model_name in ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : str = AlbertModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : Tuple ) -> Dict:
__magic_name__ : Tuple = AlbertModel.from_pretrained('albert-base-v2' )
__magic_name__ : Optional[Any] = torch.tensor([[0, 345, 232, 328, 740, 140, 1695, 69, 6078, 1588, 2]] )
__magic_name__ : str = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ : str = model(_A , attention_mask=_A )[0]
__magic_name__ : Optional[int] = torch.Size((1, 11, 768) )
self.assertEqual(output.shape , _A )
__magic_name__ : str = torch.tensor(
[[[-0.6513, 1.5035, -0.2766], [-0.6515, 1.5046, -0.2780], [-0.6512, 1.5049, -0.2784]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) ) | 331 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase :Tuple = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s
lowerCAmelCase :Union[str, Any] = 3E8 # unit of c : m * s^-1
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
__magic_name__ : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__magic_name__ : Optional[int] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__magic_name__ : Union[str, Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod() | 331 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import is_tf_available
from transformers.testing_utils import require_sentencepiece, require_tf, require_tokenizers, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import TFCamembertModel
@require_tf
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : Dict ) -> Tuple:
__magic_name__ : List[str] = TFCamembertModel.from_pretrained('jplu/tf-camembert-base' )
__magic_name__ : int = tf.convert_to_tensor(
[[5, 121, 11, 660, 16, 730, 25543, 110, 83, 6]] , dtype=tf.intaa , ) # J'aime le camembert !"
__magic_name__ : Dict = model(_A )['last_hidden_state']
__magic_name__ : Optional[Any] = tf.TensorShape((1, 10, 768) )
self.assertEqual(output.shape , _A )
# compare the actual values for a slice.
__magic_name__ : Optional[Any] = tf.convert_to_tensor(
[[[-0.0254, 0.0235, 0.1027], [0.0606, -0.1811, -0.0418], [-0.1561, -0.1127, 0.2687]]] , dtype=tf.floataa , )
# camembert = torch.hub.load('pytorch/fairseq', 'camembert.v0')
# camembert.eval()
# expected_slice = roberta.model.forward(input_ids)[0][:, :3, :3].detach()
self.assertTrue(np.allclose(output[:, :3, :3].numpy() , expected_slice.numpy() , atol=1E-4 ) ) | 331 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase :Tuple = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase :List[Any] = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase :Optional[Any] = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase :Union[str, Any] = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase :Tuple = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) )
__magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
__magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase ( lowerCAmelCase : int = 100 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Any = PokerHand(lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS]
__magic_name__ : Tuple = poker_hands.copy()
shuffle(lowerCAmelCase )
__magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) )
for index, hand in enumerate(lowerCAmelCase ):
assert hand == poker_hands[index]
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' )
__magic_name__ : Optional[Any] = True
__magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = 0
__magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) )
__magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' )
with open(lowerCAmelCase ) as file_hand:
for line in file_hand:
__magic_name__ : Optional[int] = line[:14].strip()
__magic_name__ : List[Any] = line[15:].strip()
__magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase )
__magic_name__ : List[Any] = player.compare_with(lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 376 | 331 | 1 |
'''simple docstring'''
from __future__ import annotations
from collections.abc import Callable
lowerCAmelCase :Dict = list[list[float | int]]
def lowerCamelCase ( lowerCAmelCase : Matrix , lowerCAmelCase : Matrix ):
"""simple docstring"""
__magic_name__ : int = len(lowerCAmelCase )
__magic_name__ : Matrix = [[0 for _ in range(size + 1 )] for _ in range(lowerCAmelCase )]
__magic_name__ : int
__magic_name__ : int
__magic_name__ : int
__magic_name__ : int
__magic_name__ : int
__magic_name__ : float
for row in range(lowerCAmelCase ):
for col in range(lowerCAmelCase ):
__magic_name__ : Tuple = matrix[row][col]
__magic_name__ : Tuple = vector[row][0]
__magic_name__ : Union[str, Any] = 0
__magic_name__ : Union[str, Any] = 0
while row < size and col < size:
# pivoting
__magic_name__ : List[Any] = max((abs(augmented[rowa][col] ), rowa) for rowa in range(lowerCAmelCase , lowerCAmelCase ) )[
1
]
if augmented[pivot_row][col] == 0:
col += 1
continue
else:
__magic_name__ , __magic_name__ : Optional[int] = augmented[pivot_row], augmented[row]
for rowa in range(row + 1 , lowerCAmelCase ):
__magic_name__ : Dict = augmented[rowa][col] / augmented[row][col]
__magic_name__ : int = 0
for cola in range(col + 1 , size + 1 ):
augmented[rowa][cola] -= augmented[row][cola] * ratio
row += 1
col += 1
# back substitution
for col in range(1 , lowerCAmelCase ):
for row in range(lowerCAmelCase ):
__magic_name__ : Tuple = augmented[row][col] / augmented[col][col]
for cola in range(lowerCAmelCase , size + 1 ):
augmented[row][cola] -= augmented[col][cola] * ratio
# round to get rid of numbers like 2.000000000000004
return [
[round(augmented[row][size] / augmented[row][row] , 10 )] for row in range(lowerCAmelCase )
]
def lowerCamelCase ( lowerCAmelCase : list[int] ):
"""simple docstring"""
__magic_name__ : int = len(lowerCAmelCase )
__magic_name__ : Matrix = [[0 for _ in range(lowerCAmelCase )] for _ in range(lowerCAmelCase )]
__magic_name__ : Matrix = [[0] for _ in range(lowerCAmelCase )]
__magic_name__ : Matrix
__magic_name__ : int
__magic_name__ : int
__magic_name__ : int
for x_val, y_val in enumerate(lowerCAmelCase ):
for col in range(lowerCAmelCase ):
__magic_name__ : Optional[Any] = (x_val + 1) ** (size - col - 1)
__magic_name__ : Tuple = y_val
__magic_name__ : Optional[Any] = solve(lowerCAmelCase , lowerCAmelCase )
def interpolated_func(lowerCAmelCase : int ) -> int:
return sum(
round(coeffs[x_val][0] ) * (var ** (size - x_val - 1))
for x_val in range(lowerCAmelCase ) )
return interpolated_func
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return (
1
- variable
+ variable**2
- variable**3
+ variable**4
- variable**5
+ variable**6
- variable**7
+ variable**8
- variable**9
+ variable**10
)
def lowerCamelCase ( lowerCAmelCase : Callable[[int], int] = question_function , lowerCAmelCase : int = 10 ):
"""simple docstring"""
__magic_name__ : list[int] = [func(lowerCAmelCase ) for x_val in range(1 , order + 1 )]
__magic_name__ : list[Callable[[int], int]] = [
interpolate(data_points[:max_coeff] ) for max_coeff in range(1 , order + 1 )
]
__magic_name__ : int = 0
__magic_name__ : Callable[[int], int]
__magic_name__ : int
for poly in polynomials:
__magic_name__ : str = 1
while func(lowerCAmelCase ) == poly(lowerCAmelCase ):
x_val += 1
ret += poly(lowerCAmelCase )
return ret
if __name__ == "__main__":
print(F'{solution() = }') | 331 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase :Union[str, Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | 1 |
'''simple docstring'''
import math
lowerCAmelCase :int = 1_0
lowerCAmelCase :str = 7
lowerCAmelCase :Union[str, Any] = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCamelCase ( lowerCAmelCase : int = 20 ):
"""simple docstring"""
__magic_name__ : List[Any] = math.comb(lowerCAmelCase , lowerCAmelCase )
__magic_name__ : Any = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCAmelCase )
__magic_name__ : List[str] = NUM_COLOURS * (1 - missing_colour / total)
return f'{result:.9f}'
if __name__ == "__main__":
print(solution(2_0)) | 331 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase :Any = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , **_A : Union[str, Any] ) -> Tuple:
super().__init__(**_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] , _A : Union[str, List[str], "Image", List["Image"]] , **_A : Dict ) -> Dict:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> Optional[int]:
__magic_name__ : str = {}
if "candidate_labels" in kwargs:
__magic_name__ : str = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__magic_name__ : Tuple = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __lowerCAmelCase ( self : str , _A : Dict , _A : Optional[Any]=None , _A : int="This is a photo of {}." ) -> int:
__magic_name__ : Dict = load_image(_A )
__magic_name__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
__magic_name__ : Optional[Any] = candidate_labels
__magic_name__ : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels]
__magic_name__ : str = self.tokenizer(_A , return_tensors=self.framework , padding=_A )
__magic_name__ : Optional[Any] = [text_inputs]
return inputs
def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> str:
__magic_name__ : str = model_inputs.pop('candidate_labels' )
__magic_name__ : str = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , _A ):
__magic_name__ : Dict = text_inputs[0]
else:
# Batching case.
__magic_name__ : Optional[Any] = text_inputs[0][0]
__magic_name__ : List[Any] = self.model(**_A , **_A )
__magic_name__ : str = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[int]:
__magic_name__ : Tuple = model_outputs.pop('candidate_labels' )
__magic_name__ : Union[str, Any] = model_outputs['logits'][0]
if self.framework == "pt":
__magic_name__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 )
__magic_name__ : Tuple = probs.tolist()
if not isinstance(_A , _A ):
__magic_name__ : Any = [scores]
elif self.framework == "tf":
__magic_name__ : Any = stable_softmax(_A , axis=-1 )
__magic_name__ : Dict = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__magic_name__ : Union[str, Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] )
]
return result | 331 | 1 |
'''simple docstring'''
import argparse
import os
import re
import packaging.version
lowerCAmelCase :Any = '''examples/'''
lowerCAmelCase :List[str] = {
'''examples''': (re.compile(r'''^check_min_version\("[^"]+"\)\s*$''', re.MULTILINE), '''check_min_version("VERSION")\n'''),
'''init''': (re.compile(r'''^__version__\s+=\s+"([^"]+)"\s*$''', re.MULTILINE), '''__version__ = "VERSION"\n'''),
'''setup''': (re.compile(r'''^(\s*)version\s*=\s*"[^"]+",''', re.MULTILINE), r'''\1version="VERSION",'''),
'''doc''': (re.compile(r'''^(\s*)release\s*=\s*"[^"]+"$''', re.MULTILINE), '''release = "VERSION"\n'''),
}
lowerCAmelCase :Tuple = {
'''init''': '''src/diffusers/__init__.py''',
'''setup''': '''setup.py''',
}
lowerCAmelCase :int = '''README.md'''
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : int , lowerCAmelCase : str ):
"""simple docstring"""
with open(lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__magic_name__ : str = f.read()
__magic_name__ , __magic_name__ : int = REPLACE_PATTERNS[pattern]
__magic_name__ : Union[str, Any] = replace.replace('VERSION' , lowerCAmelCase )
__magic_name__ : str = re_pattern.sub(lowerCAmelCase , lowerCAmelCase )
with open(lowerCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.write(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
for folder, directories, fnames in os.walk(lowerCAmelCase ):
# Removing some of the folders with non-actively maintained examples from the walk
if "research_projects" in directories:
directories.remove('research_projects' )
if "legacy" in directories:
directories.remove('legacy' )
for fname in fnames:
if fname.endswith('.py' ):
update_version_in_file(os.path.join(lowerCAmelCase , lowerCAmelCase ) , lowerCAmelCase , pattern='examples' )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any=False ):
"""simple docstring"""
for pattern, fname in REPLACE_FILES.items():
update_version_in_file(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase )
if not patch:
update_version_in_examples(lowerCAmelCase )
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = '🤗 Transformers currently provides the following architectures'
__magic_name__ : Any = '1. Want to contribute a new model?'
with open(lowerCAmelCase , 'r' , encoding='utf-8' , newline='\n' ) as f:
__magic_name__ : str = f.readlines()
# Find the start of the list.
__magic_name__ : Optional[int] = 0
while not lines[start_index].startswith(_start_prompt ):
start_index += 1
start_index += 1
__magic_name__ : Union[str, Any] = start_index
# Update the lines in the model list.
while not lines[index].startswith(_end_prompt ):
if lines[index].startswith('1.' ):
__magic_name__ : Any = lines[index].replace(
'https://huggingface.co/docs/diffusers/main/model_doc' , 'https://huggingface.co/docs/diffusers/model_doc' , )
index += 1
with open(lowerCAmelCase , 'w' , encoding='utf-8' , newline='\n' ) as f:
f.writelines(lowerCAmelCase )
def lowerCamelCase ( ):
"""simple docstring"""
with open(REPLACE_FILES['init'] , 'r' ) as f:
__magic_name__ : Any = f.read()
__magic_name__ : str = REPLACE_PATTERNS['init'][0].search(lowerCAmelCase ).groups()[0]
return packaging.version.parse(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Tuple=False ):
"""simple docstring"""
__magic_name__ : Dict = get_version()
if patch and default_version.is_devrelease:
raise ValueError('Can\'t create a patch version from the dev branch, checkout a released version!' )
if default_version.is_devrelease:
__magic_name__ : List[str] = default_version.base_version
elif patch:
__magic_name__ : Optional[Any] = f'{default_version.major}.{default_version.minor}.{default_version.micro + 1}'
else:
__magic_name__ : Union[str, Any] = f'{default_version.major}.{default_version.minor + 1}.0'
# Now let's ask nicely if that's the right one.
__magic_name__ : Optional[int] = input(f'Which version are you releasing? [{default_version}]' )
if len(lowerCAmelCase ) == 0:
__magic_name__ : Union[str, Any] = default_version
print(f'Updating version to {version}.' )
global_version_update(lowerCAmelCase , patch=lowerCAmelCase )
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : int = get_version()
__magic_name__ : Optional[Any] = f'{current_version.major}.{current_version.minor + 1}.0.dev0'
__magic_name__ : str = current_version.base_version
# Check with the user we got that right.
__magic_name__ : int = input(f'Which version are we developing now? [{dev_version}]' )
if len(lowerCAmelCase ) == 0:
__magic_name__ : Optional[Any] = dev_version
print(f'Updating version to {version}.' )
global_version_update(lowerCAmelCase )
# print("Cleaning main README, don't forget to run `make fix-copies`.")
# clean_main_ref_in_model_list()
if __name__ == "__main__":
lowerCAmelCase :Tuple = argparse.ArgumentParser()
parser.add_argument('''--post_release''', action='''store_true''', help='''Whether this is pre or post release.''')
parser.add_argument('''--patch''', action='''store_true''', help='''Whether or not this is a patch release.''')
lowerCAmelCase :Optional[int] = parser.parse_args()
if not args.post_release:
pre_release_work(patch=args.patch)
elif args.patch:
print('''Nothing to do after a patch :-)''')
else:
post_release_work() | 331 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase :int = '''pt'''
elif is_tf_available():
lowerCAmelCase :Optional[Any] = '''tf'''
else:
lowerCAmelCase :Optional[Any] = '''jax'''
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = ByTaTokenizer
A_ : Dict = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
super().setUp()
__magic_name__ : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __lowerCAmelCase ( self : Tuple , **_A : Optional[int] ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : int=False , _A : Union[str, Any]=20 , _A : Optional[int]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__magic_name__ : Optional[Any] = []
for i in range(len(_A ) ):
try:
__magic_name__ : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__magic_name__ : Any = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) )
__magic_name__ : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
__magic_name__ : Optional[int] = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
__magic_name__ : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__magic_name__ : List[str] = [t[0] for t in toks]
# Ensure consistency
__magic_name__ : Optional[int] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
__magic_name__ : int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
__magic_name__ : Union[str, Any] = ' ' + output_txt
__magic_name__ : Dict = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def __lowerCAmelCase ( self : int ) -> str:
__magic_name__ : Any = self.ta_base_tokenizer
__magic_name__ : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
__magic_name__ : List[str] = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __lowerCAmelCase ( self : int ) -> Tuple:
__magic_name__ : Optional[int] = self.ta_base_tokenizer
__magic_name__ : Optional[int] = 'Unicode €.'
__magic_name__ : Optional[Any] = tokenizer(_A )
__magic_name__ : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__magic_name__ : Any = tokenizer.decode(_A )
self.assertEqual(_A , 'Unicode €.</s>' )
__magic_name__ : Any = tokenizer('e è é ê ë' )
__magic_name__ : str = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__magic_name__ : List[str] = tokenizer.decode(_A )
self.assertEqual(_A , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __lowerCAmelCase ( self : Any ) -> int:
__magic_name__ : List[Any] = self.ta_base_tokenizer
__magic_name__ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__magic_name__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__magic_name__ : Any = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
__magic_name__ : str = list(batch.input_ids.numpy()[0] )
else:
__magic_name__ : Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : str = self.ta_base_tokenizer
__magic_name__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__magic_name__ : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _A )
self.assertIn('attention_mask' , _A )
self.assertNotIn('decoder_input_ids' , _A )
self.assertNotIn('decoder_attention_mask' , _A )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
__magic_name__ : Union[str, Any] = self.ta_base_tokenizer
__magic_name__ : Tuple = [
'Summary of the text.',
'Another summary.',
]
__magic_name__ : Dict = tokenizer(
text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : str = self.ta_base_tokenizer
__magic_name__ : Any = ['A long paragraph for summarization. </s>']
__magic_name__ : List[str] = ['Summary of the text. </s>']
# fmt: off
__magic_name__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__magic_name__ : List[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__magic_name__ : str = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['input_ids'][0] )
self.assertEqual(_A , batch['labels'][0] )
def __lowerCAmelCase ( self : Any ) -> str:
# safety check on max_len default value so we are sure the test works
__magic_name__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__magic_name__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : str = tempfile.mkdtemp()
__magic_name__ : Tuple = ' He is very happy, UNwant\u00E9d,running'
__magic_name__ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ : List[str] = tokenizer.__class__.from_pretrained(_A )
__magic_name__ : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
__magic_name__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : Optional[Any] = tempfile.mkdtemp()
__magic_name__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__magic_name__ : Union[str, Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ : Any = tokenizer.__class__.from_pretrained(_A )
__magic_name__ : Dict = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__magic_name__ : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
__magic_name__ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__magic_name__ : Union[str, Any] = json.load(_A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__magic_name__ : Optional[Any] = json.load(_A )
__magic_name__ : List[str] = [F'<extra_id_{i}>' for i in range(125 )]
__magic_name__ : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
__magic_name__ : Tuple = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__ : str = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )]
__magic_name__ : Optional[Any] = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
__magic_name__ : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
__magic_name__ : List[Any] = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
pass
def __lowerCAmelCase ( self : List[str] ) -> int:
pass
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
pass
def __lowerCAmelCase ( self : List[Any] ) -> int:
pass
def __lowerCAmelCase ( self : str ) -> Tuple:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__magic_name__ : List[str] = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
__magic_name__ : int = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : List[str] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
__magic_name__ : List[str] = 0
__magic_name__ : str = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] )
setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] ) | 331 | 1 |
'''simple docstring'''
import datasets
from .nmt_bleu import compute_bleu # From: https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py
lowerCAmelCase :Tuple = '''\
@INPROCEEDINGS{Papineni02bleu:a,
author = {Kishore Papineni and Salim Roukos and Todd Ward and Wei-jing Zhu},
title = {BLEU: a Method for Automatic Evaluation of Machine Translation},
booktitle = {},
year = {2002},
pages = {311--318}
}
@inproceedings{lin-och-2004-orange,
title = "{ORANGE}: a Method for Evaluating Automatic Evaluation Metrics for Machine Translation",
author = "Lin, Chin-Yew and
Och, Franz Josef",
booktitle = "{COLING} 2004: Proceedings of the 20th International Conference on Computational Linguistics",
month = "aug 23{--}aug 27",
year = "2004",
address = "Geneva, Switzerland",
publisher = "COLING",
url = "https://www.aclweb.org/anthology/C04-1072",
pages = "501--507",
}
'''
lowerCAmelCase :List[Any] = '''\
BLEU (bilingual evaluation understudy) is an algorithm for evaluating the quality of text which has been machine-translated from one natural language to another.
Quality is considered to be the correspondence between a machine\'s output and that of a human: "the closer a machine translation is to a professional human translation,
the better it is" – this is the central idea behind BLEU. BLEU was one of the first metrics to claim a high correlation with human judgements of quality, and
remains one of the most popular automated and inexpensive metrics.
Scores are calculated for individual translated segments—generally sentences—by comparing them with a set of good quality reference translations.
Those scores are then averaged over the whole corpus to reach an estimate of the translation\'s overall quality. Intelligibility or grammatical correctness
are not taken into account[citation needed].
BLEU\'s output is always a number between 0 and 1. This value indicates how similar the candidate text is to the reference texts, with values closer to 1
representing more similar texts. Few human translations will attain a score of 1, since this would indicate that the candidate is identical to one of the
reference translations. For this reason, it is not necessary to attain a score of 1. Because there are more opportunities to match, adding additional
reference translations will increase the BLEU score.
'''
lowerCAmelCase :Tuple = '''
Computes BLEU score of translated segments against one or more references.
Args:
predictions: list of translations to score.
Each translation should be tokenized into a list of tokens.
references: list of lists of references for each translation.
Each reference should be tokenized into a list of tokens.
max_order: Maximum n-gram order to use when computing BLEU score.
smooth: Whether or not to apply Lin et al. 2004 smoothing.
Returns:
\'bleu\': bleu score,
\'precisions\': geometric mean of n-gram precisions,
\'brevity_penalty\': brevity penalty,
\'length_ratio\': ratio of lengths,
\'translation_length\': translation_length,
\'reference_length\': reference_length
Examples:
>>> predictions = [
... ["hello", "there", "general", "kenobi"], # tokenized prediction of the first sample
... ["foo", "bar", "foobar"] # tokenized prediction of the second sample
... ]
>>> references = [
... [["hello", "there", "general", "kenobi"], ["hello", "there", "!"]], # tokenized references for the first sample (2 references)
... [["foo", "bar", "foobar"]] # tokenized references for the second sample (1 reference)
... ]
>>> bleu = datasets.load_metric("bleu")
>>> results = bleu.compute(predictions=predictions, references=references)
>>> print(results["bleu"])
1.0
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'predictions': datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ),
'references': datasets.Sequence(
datasets.Sequence(datasets.Value('string' , id='token' ) , id='sequence' ) , id='references' ),
} ) , codebase_urls=['https://github.com/tensorflow/nmt/blob/master/nmt/scripts/bleu.py'] , reference_urls=[
'https://en.wikipedia.org/wiki/BLEU',
'https://towardsdatascience.com/evaluating-text-output-in-nlp-bleu-at-your-own-risk-e8609665a213',
] , )
def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : Any , _A : Optional[Any]=4 , _A : str=False ) -> Union[str, Any]:
__magic_name__ : int = compute_bleu(
reference_corpus=_A , translation_corpus=_A , max_order=_A , smooth=_A )
((__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__) , (__magic_name__)) : List[str] = score
return {
"bleu": bleu,
"precisions": precisions,
"brevity_penalty": bp,
"length_ratio": ratio,
"translation_length": translation_length,
"reference_length": reference_length,
} | 331 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ : Dict = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
__magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]]
__magic_name__ : List[Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 )
__magic_name__ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 )
__magic_name__ : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 )
__magic_name__ : Any = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] ) | 331 | 1 |
'''simple docstring'''
import argparse
import os
import gluonnlp as nlp
import mxnet as mx
import numpy as np
import torch
from gluonnlp.base import get_home_dir
from gluonnlp.model.bert import BERTEncoder
from gluonnlp.model.utils import _load_vocab
from gluonnlp.vocab import Vocab
from packaging import version
from torch import nn
from transformers import BertConfig, BertForMaskedLM, BertModel, RobertaTokenizer
from transformers.models.bert.modeling_bert import (
BertIntermediate,
BertLayer,
BertOutput,
BertSelfAttention,
BertSelfOutput,
)
from transformers.utils import logging
if version.parse(nlp.__version__) != version.parse('''0.8.3'''):
raise Exception('''requires gluonnlp == 0.8.3''')
if version.parse(mx.__version__) != version.parse('''1.5.0'''):
raise Exception('''requires mxnet == 1.5.0''')
logging.set_verbosity_info()
lowerCAmelCase :int = logging.get_logger(__name__)
lowerCAmelCase :int = '''The Nymphenburg Palace is a beautiful palace in Munich!'''
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : str ):
"""simple docstring"""
__magic_name__ : str = {
'attention_cell': 'multi_head',
'num_layers': 4,
'units': 1024,
'hidden_size': 768,
'max_length': 512,
'num_heads': 8,
'scaled': True,
'dropout': 0.1,
'use_residual': True,
'embed_size': 1024,
'embed_dropout': 0.1,
'word_embed': None,
'layer_norm_eps': 1e-5,
'token_type_vocab_size': 2,
}
__magic_name__ : Tuple = bort_4_8_768_1024_hparams
# Let's construct the original Bort model here
# Taken from official BERT implementation, see:
# https://github.com/alexa/bort/blob/master/bort/bort.py
__magic_name__ : Any = BERTEncoder(
attention_cell=predefined_args['attention_cell'] , num_layers=predefined_args['num_layers'] , units=predefined_args['units'] , hidden_size=predefined_args['hidden_size'] , max_length=predefined_args['max_length'] , num_heads=predefined_args['num_heads'] , scaled=predefined_args['scaled'] , dropout=predefined_args['dropout'] , output_attention=lowerCAmelCase , output_all_encodings=lowerCAmelCase , use_residual=predefined_args['use_residual'] , activation=predefined_args.get('activation' , 'gelu' ) , layer_norm_eps=predefined_args.get('layer_norm_eps' , lowerCAmelCase ) , )
# Vocab information needs to be fetched first
# It's the same as RoBERTa, so RobertaTokenizer can be used later
__magic_name__ : int = 'openwebtext_ccnews_stories_books_cased'
# Specify download folder to Gluonnlp's vocab
__magic_name__ : str = os.path.join(get_home_dir() , 'models' )
__magic_name__ : int = _load_vocab(lowerCAmelCase , lowerCAmelCase , lowerCAmelCase , cls=lowerCAmelCase )
__magic_name__ : Dict = nlp.model.BERTModel(
lowerCAmelCase , len(lowerCAmelCase ) , units=predefined_args['units'] , embed_size=predefined_args['embed_size'] , embed_dropout=predefined_args['embed_dropout'] , word_embed=predefined_args['word_embed'] , use_pooler=lowerCAmelCase , use_token_type_embed=lowerCAmelCase , token_type_vocab_size=predefined_args['token_type_vocab_size'] , use_classifier=lowerCAmelCase , use_decoder=lowerCAmelCase , )
original_bort.load_parameters(lowerCAmelCase , cast_dtype=lowerCAmelCase , ignore_extra=lowerCAmelCase )
__magic_name__ : List[Any] = original_bort._collect_params_with_prefix()
# Build our config 🤗
__magic_name__ : List[Any] = {
'architectures': ['BertForMaskedLM'],
'attention_probs_dropout_prob': predefined_args['dropout'],
'hidden_act': 'gelu',
'hidden_dropout_prob': predefined_args['dropout'],
'hidden_size': predefined_args['embed_size'],
'initializer_range': 0.02,
'intermediate_size': predefined_args['hidden_size'],
'layer_norm_eps': predefined_args['layer_norm_eps'],
'max_position_embeddings': predefined_args['max_length'],
'model_type': 'bort',
'num_attention_heads': predefined_args['num_heads'],
'num_hidden_layers': predefined_args['num_layers'],
'pad_token_id': 1, # 2 = BERT, 1 = RoBERTa
'type_vocab_size': 1, # 2 = BERT, 1 = RoBERTa
'vocab_size': len(lowerCAmelCase ),
}
__magic_name__ : List[str] = BertConfig.from_dict(lowerCAmelCase )
__magic_name__ : Optional[int] = BertForMaskedLM(lowerCAmelCase )
hf_bort_model.eval()
# Parameter mapping table (Gluonnlp to Transformers)
# * denotes layer index
#
# | Gluon Parameter | Transformers Parameter
# | -------------------------------------------------------------- | ----------------------
# | `encoder.layer_norm.beta` | `bert.embeddings.LayerNorm.bias`
# | `encoder.layer_norm.gamma` | `bert.embeddings.LayerNorm.weight`
# | `encoder.position_weight` | `bert.embeddings.position_embeddings.weight`
# | `word_embed.0.weight` | `bert.embeddings.word_embeddings.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_key.bias` | `bert.encoder.layer.*.attention.self.key.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_key.weight` | `bert.encoder.layer.*.attention.self.key.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_query.bias` | `bert.encoder.layer.*.attention.self.query.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_query.weight` | `bert.encoder.layer.*.attention.self.query.weight`
# | `encoder.transformer_cells.*.attention_cell.proj_value.bias` | `bert.encoder.layer.*.attention.self.value.bias`
# | `encoder.transformer_cells.*.attention_cell.proj_value.weight` | `bert.encoder.layer.*.attention.self.value.weight`
# | `encoder.transformer_cells.*.ffn.ffn_2.bias` | `bert.encoder.layer.*.attention.output.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_2.weight` | `bert.encoder.layer.*.attention.output.dense.weight`
# | `encoder.transformer_cells.*.layer_norm.beta` | `bert.encoder.layer.*.attention.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.layer_norm.gamma` | `bert.encoder.layer.*.attention.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.ffn.ffn_1.bias` | `bert.encoder.layer.*.intermediate.dense.bias`
# | `encoder.transformer_cells.*.ffn.ffn_1.weight` | `bert.encoder.layer.*.intermediate.dense.weight`
# | `encoder.transformer_cells.*.ffn.layer_norm.beta` | `bert.encoder.layer.*.output.LayerNorm.bias`
# | `encoder.transformer_cells.*.ffn.layer_norm.gamma` | `bert.encoder.layer.*.output.LayerNorm.weight`
# | `encoder.transformer_cells.*.proj.bias` | `bert.encoder.layer.*.output.dense.bias`
# | `encoder.transformer_cells.*.proj.weight` | `bert.encoder.layer.*.output.dense.weight`
# Helper function to convert MXNET Arrays to PyTorch
def to_torch(lowerCAmelCase : Optional[int] ) -> nn.Parameter:
return nn.Parameter(torch.FloatTensor(mx_array.data().asnumpy() ) )
# Check param shapes and map new HF param back
def check_and_map_params(lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[int] ):
__magic_name__ : str = hf_param.shape
__magic_name__ : Dict = to_torch(params[gluon_param] )
__magic_name__ : Any = gluon_param.shape
assert (
shape_hf == shape_gluon
), f'The gluon parameter {gluon_param} has shape {shape_gluon}, but expects shape {shape_hf} for Transformers'
return gluon_param
__magic_name__ : Union[str, Any] = check_and_map_params(
hf_bort_model.bert.embeddings.word_embeddings.weight , 'word_embed.0.weight' )
__magic_name__ : Any = check_and_map_params(
hf_bort_model.bert.embeddings.position_embeddings.weight , 'encoder.position_weight' )
__magic_name__ : List[Any] = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.bias , 'encoder.layer_norm.beta' )
__magic_name__ : int = check_and_map_params(
hf_bort_model.bert.embeddings.LayerNorm.weight , 'encoder.layer_norm.gamma' )
# Inspired by RoBERTa conversion script, we just zero them out (Bort does not use them)
__magic_name__ : List[str] = torch.zeros_like(
hf_bort_model.bert.embeddings.token_type_embeddings.weight.data )
for i in range(hf_bort_config.num_hidden_layers ):
__magic_name__ : BertLayer = hf_bort_model.bert.encoder.layer[i]
# self attention
__magic_name__ : BertSelfAttention = layer.attention.self
__magic_name__ : Union[str, Any] = check_and_map_params(
self_attn.key.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.bias' )
__magic_name__ : Dict = check_and_map_params(
self_attn.key.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_key.weight' )
__magic_name__ : int = check_and_map_params(
self_attn.query.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.bias' )
__magic_name__ : Dict = check_and_map_params(
self_attn.query.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_query.weight' )
__magic_name__ : Optional[Any] = check_and_map_params(
self_attn.value.bias.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.bias' )
__magic_name__ : List[str] = check_and_map_params(
self_attn.value.weight.data , f'encoder.transformer_cells.{i}.attention_cell.proj_value.weight' )
# self attention output
__magic_name__ : BertSelfOutput = layer.attention.output
__magic_name__ : str = check_and_map_params(
self_output.dense.bias , f'encoder.transformer_cells.{i}.proj.bias' )
__magic_name__ : List[Any] = check_and_map_params(
self_output.dense.weight , f'encoder.transformer_cells.{i}.proj.weight' )
__magic_name__ : str = check_and_map_params(
self_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.layer_norm.beta' )
__magic_name__ : Dict = check_and_map_params(
self_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.layer_norm.gamma' )
# intermediate
__magic_name__ : BertIntermediate = layer.intermediate
__magic_name__ : Any = check_and_map_params(
intermediate.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_1.bias' )
__magic_name__ : List[str] = check_and_map_params(
intermediate.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_1.weight' )
# output
__magic_name__ : BertOutput = layer.output
__magic_name__ : Optional[Any] = check_and_map_params(
bert_output.dense.bias , f'encoder.transformer_cells.{i}.ffn.ffn_2.bias' )
__magic_name__ : Tuple = check_and_map_params(
bert_output.dense.weight , f'encoder.transformer_cells.{i}.ffn.ffn_2.weight' )
__magic_name__ : Optional[int] = check_and_map_params(
bert_output.LayerNorm.bias , f'encoder.transformer_cells.{i}.ffn.layer_norm.beta' )
__magic_name__ : str = check_and_map_params(
bert_output.LayerNorm.weight , f'encoder.transformer_cells.{i}.ffn.layer_norm.gamma' )
# Save space and energy 🎄
hf_bort_model.half()
# Compare output of both models
__magic_name__ : Tuple = RobertaTokenizer.from_pretrained('roberta-base' )
__magic_name__ : Optional[int] = tokenizer.encode_plus(lowerCAmelCase )['input_ids']
# Get gluon output
__magic_name__ : Optional[Any] = mx.nd.array([input_ids] )
__magic_name__ : List[Any] = original_bort(inputs=lowerCAmelCase , token_types=[] )
# Get Transformer output (save and reload model again)
hf_bort_model.save_pretrained(lowerCAmelCase )
__magic_name__ : Dict = BertModel.from_pretrained(lowerCAmelCase )
hf_bort_model.eval()
__magic_name__ : List[str] = tokenizer.encode_plus(lowerCAmelCase , return_tensors='pt' )
__magic_name__ : Optional[int] = hf_bort_model(**lowerCAmelCase )[0]
__magic_name__ : int = output_gluon[0].asnumpy()
__magic_name__ : Any = output_hf[0].detach().numpy()
__magic_name__ : int = np.max(np.abs(hf_layer - gluon_layer ) ).item()
__magic_name__ : Union[str, Any] = np.allclose(lowerCAmelCase , lowerCAmelCase , atol=1e-3 )
if success:
print('✔️ Both model do output the same tensors' )
else:
print('❌ Both model do **NOT** output the same tensors' )
print('Absolute difference is:' , lowerCAmelCase )
if __name__ == "__main__":
lowerCAmelCase :int = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--bort_checkpoint_path''', default=None, type=str, required=True, help='''Path the official Bort params file.'''
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
lowerCAmelCase :Optional[Any] = parser.parse_args()
convert_bort_checkpoint_to_pytorch(args.bort_checkpoint_path, args.pytorch_dump_folder_path) | 331 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase :List[str] = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
lowerCAmelCase :List[Any] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Tuple = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Optional[Any] = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[int] = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowerCAmelCase :Tuple = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Union[str, Any] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowerCAmelCase :Dict = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowerCAmelCase :Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowerCAmelCase :Any = ''''''
lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
__magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : str = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
__magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Any = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) | 331 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from datasets.commands.convert import ConvertCommand
from datasets.commands.dummy_data import DummyDataCommand
from datasets.commands.env import EnvironmentCommand
from datasets.commands.run_beam import RunBeamCommand
from datasets.commands.test import TestCommand
from datasets.utils.logging import set_verbosity_info
def lowerCamelCase ( lowerCAmelCase : Dict ):
"""simple docstring"""
return {key.lstrip('-' ): value for key, value in zip(unknown_args[::2] , unknown_args[1::2] )}
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Any = ArgumentParser(
'HuggingFace Datasets CLI tool' , usage='datasets-cli <command> [<args>]' , allow_abbrev=lowerCAmelCase )
__magic_name__ : Dict = parser.add_subparsers(help='datasets-cli command helpers' )
set_verbosity_info()
# Register commands
ConvertCommand.register_subcommand(lowerCAmelCase )
EnvironmentCommand.register_subcommand(lowerCAmelCase )
TestCommand.register_subcommand(lowerCAmelCase )
RunBeamCommand.register_subcommand(lowerCAmelCase )
DummyDataCommand.register_subcommand(lowerCAmelCase )
# Parse args
__magic_name__ , __magic_name__ : List[Any] = parser.parse_known_args()
if not hasattr(lowerCAmelCase , 'func' ):
parser.print_help()
exit(1 )
__magic_name__ : Union[str, Any] = parse_unknown_args(lowerCAmelCase )
# Run
__magic_name__ : Any = args.func(lowerCAmelCase , **lowerCAmelCase )
service.run()
if __name__ == "__main__":
main() | 331 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Optional[int]=7 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=True , _A : Dict=True , _A : int=99 , _A : str=32 , _A : List[Any]=2 , _A : Any=4 , _A : List[str]=37 , _A : List[str]="gelu" , _A : Any=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Union[str, Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : str=4 , _A : int=None , ) -> int:
__magic_name__ : str = parent
__magic_name__ : List[Any] = 13
__magic_name__ : Union[str, Any] = 7
__magic_name__ : Tuple = True
__magic_name__ : Dict = True
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = True
__magic_name__ : int = 99
__magic_name__ : List[str] = 384
__magic_name__ : Optional[int] = 2
__magic_name__ : List[Any] = 4
__magic_name__ : int = 37
__magic_name__ : Union[str, Any] = 'gelu'
__magic_name__ : Optional[int] = 0.1
__magic_name__ : str = 0.1
__magic_name__ : Optional[Any] = 512
__magic_name__ : Any = 16
__magic_name__ : Union[str, Any] = 2
__magic_name__ : Any = 0.02
__magic_name__ : List[str] = 3
__magic_name__ : Tuple = 4
__magic_name__ : List[Any] = 128
__magic_name__ : Optional[Any] = 2
__magic_name__ : List[str] = 9
__magic_name__ : str = 1
__magic_name__ : List[str] = None
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[Any] = None
if self.use_input_mask:
__magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[str] = None
if self.use_token_type_ids:
__magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
__magic_name__ : int = None
if self.use_labels:
__magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : int , _A : int , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : int , _A : Union[str, Any] ) -> Any:
__magic_name__ : Dict = TFConvBertModel(config=_A )
__magic_name__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__magic_name__ : Any = [input_ids, input_mask]
__magic_name__ : Tuple = model(_A )
__magic_name__ : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : int , _A : str , _A : Dict , _A : Dict , _A : Dict , _A : Any , _A : Optional[int] , _A : int ) -> Optional[Any]:
__magic_name__ : Dict = TFConvBertForMaskedLM(config=_A )
__magic_name__ : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> Tuple:
__magic_name__ : Any = self.num_labels
__magic_name__ : str = TFConvBertForSequenceClassification(config=_A )
__magic_name__ : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : str , _A : str , _A : int , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = self.num_choices
__magic_name__ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
__magic_name__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : str = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Optional[int] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : List[Any] , _A : int , _A : List[str] , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : Optional[int] ) -> List[Any]:
__magic_name__ : List[Any] = self.num_labels
__magic_name__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
__magic_name__ : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[str] ) -> int:
__magic_name__ : Dict = TFConvBertForQuestionAnswering(config=_A )
__magic_name__ : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[str] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : str = config_and_inputs
__magic_name__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : List[str] = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Tuple = False
A_ : Any = False
A_ : List[Any] = False
def __lowerCAmelCase ( self : List[Any] ) -> int:
__magic_name__ : Optional[Any] = TFConvBertModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : int ) -> Any:
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : Dict ) -> List[str]:
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[int] = True
__magic_name__ : Any = True
if hasattr(_A , 'use_cache' ):
__magic_name__ : List[Any] = True
__magic_name__ : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : Optional[Any] = getattr(self.model_tester , 'key_length' , _A )
for model_class in self.all_model_classes:
__magic_name__ : List[str] = self._prepare_for_class(_A , _A )
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Tuple = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
__magic_name__ : Union[str, Any] = os.path.join(_A , 'saved_model' , '1' )
__magic_name__ : Optional[int] = tf.keras.models.load_model(_A )
__magic_name__ : Optional[Any] = model(_A )
if self.is_encoder_decoder:
__magic_name__ : Optional[int] = outputs['encoder_hidden_states']
__magic_name__ : Tuple = outputs['encoder_attentions']
else:
__magic_name__ : Union[str, Any] = outputs['hidden_states']
__magic_name__ : Optional[Any] = outputs['attentions']
self.assertEqual(len(_A ) , _A )
__magic_name__ : Optional[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
__magic_name__ : Optional[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_A )
def __lowerCAmelCase ( self : List[str] ) -> Any:
__magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : str = True
__magic_name__ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'key_length' , _A )
__magic_name__ : Optional[int] = getattr(self.model_tester , 'key_length' , _A )
def check_decoder_attentions_output(_A : List[Any] ):
__magic_name__ : Tuple = len(_A )
self.assertEqual(out_len % 2 , 0 )
__magic_name__ : Any = outputs.decoder_attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_A : int ):
__magic_name__ : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = False
__magic_name__ : List[str] = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
__magic_name__ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
__magic_name__ : Any = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ : Optional[int] = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Optional[int] = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
__magic_name__ : str = True
__magic_name__ : str = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : str = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) )
self.assertEqual(model.config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : int ) -> int:
__magic_name__ : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__magic_name__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : Tuple = model(_A )[0]
__magic_name__ : str = [1, 6, 768]
self.assertEqual(output.shape , _A )
__magic_name__ : Tuple = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 ) | 331 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_xlnet import XLNetTokenizer
else:
lowerCAmelCase :Union[str, Any] = None
lowerCAmelCase :List[Any] = logging.get_logger(__name__)
lowerCAmelCase :int = {'''vocab_file''': '''spiece.model''', '''tokenizer_file''': '''tokenizer.json'''}
lowerCAmelCase :str = {
'''vocab_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/spiece.model''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/spiece.model''',
},
'''tokenizer_file''': {
'''xlnet-base-cased''': '''https://huggingface.co/xlnet-base-cased/resolve/main/tokenizer.json''',
'''xlnet-large-cased''': '''https://huggingface.co/xlnet-large-cased/resolve/main/tokenizer.json''',
},
}
lowerCAmelCase :str = {
'''xlnet-base-cased''': None,
'''xlnet-large-cased''': None,
}
lowerCAmelCase :List[Any] = '''▁'''
# Segments (not really needed)
lowerCAmelCase :Optional[Any] = 0
lowerCAmelCase :Any = 1
lowerCAmelCase :Optional[Any] = 2
lowerCAmelCase :Optional[Any] = 3
lowerCAmelCase :Optional[Any] = 4
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : List[str] = VOCAB_FILES_NAMES
A_ : Optional[Any] = PRETRAINED_VOCAB_FILES_MAP
A_ : Union[str, Any] = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
A_ : Union[str, Any] = """left"""
A_ : Union[str, Any] = XLNetTokenizer
def __init__( self : Tuple , _A : Dict=None , _A : Optional[Any]=None , _A : Optional[Any]=False , _A : int=True , _A : Optional[Any]=False , _A : Dict="<s>" , _A : List[Any]="</s>" , _A : List[str]="<unk>" , _A : int="<sep>" , _A : Dict="<pad>" , _A : List[str]="<cls>" , _A : Tuple="<mask>" , _A : int=["<eop>", "<eod>"] , **_A : Optional[int] , ) -> List[str]:
# Mask token behave like a normal word, i.e. include the space before it
__magic_name__ : Any = AddedToken(_A , lstrip=_A , rstrip=_A ) if isinstance(_A , _A ) else mask_token
super().__init__(
vocab_file=_A , tokenizer_file=_A , do_lower_case=_A , remove_space=_A , keep_accents=_A , bos_token=_A , eos_token=_A , unk_token=_A , sep_token=_A , pad_token=_A , cls_token=_A , mask_token=_A , additional_special_tokens=_A , **_A , )
__magic_name__ : str = 3
__magic_name__ : Tuple = do_lower_case
__magic_name__ : Union[str, Any] = remove_space
__magic_name__ : List[Any] = keep_accents
__magic_name__ : Union[str, Any] = vocab_file
__magic_name__ : int = False if not self.vocab_file else True
def __lowerCAmelCase ( self : Union[str, Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
__magic_name__ : List[str] = [self.sep_token_id]
__magic_name__ : List[Any] = [self.cls_token_id]
if token_ids_a is None:
return token_ids_a + sep + cls
return token_ids_a + sep + token_ids_a + sep + cls
def __lowerCAmelCase ( self : Optional[Any] , _A : List[int] , _A : Optional[List[int]] = None ) -> List[int]:
__magic_name__ : int = [self.sep_token_id]
__magic_name__ : Union[str, Any] = [2]
if token_ids_a is None:
return len(token_ids_a + sep ) * [0] + cls_segment_id
return len(token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1] + cls_segment_id
def __lowerCAmelCase ( self : str , _A : str , _A : Optional[str] = None ) -> Tuple[str]:
if not self.can_save_slow_tokenizer:
raise ValueError(
'Your fast tokenizer does not have the necessary information to save the vocabulary for a slow '
'tokenizer.' )
if not os.path.isdir(_A ):
logger.error(F'Vocabulary path ({save_directory}) should be a directory' )
return
__magic_name__ : Tuple = os.path.join(
_A , (filename_prefix + '-' if filename_prefix else '') + VOCAB_FILES_NAMES['vocab_file'] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_A ):
copyfile(self.vocab_file , _A )
return (out_vocab_file,) | 331 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase :Dict = pytest.mark.integration
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
__magic_name__ : Union[str, Any] = dset.map(
lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A )
__magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def __lowerCAmelCase ( self : Any ) -> str:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Tuple ) -> int:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
from elasticsearch import Elasticsearch
__magic_name__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : int = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__magic_name__ : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_A )
__magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
import faiss
__magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__magic_name__ : str = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Optional[int] = 1
__magic_name__ , __magic_name__ : str = index.search(_A )
self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1]
__magic_name__ , __magic_name__ : str = index.search_batch(_A )
self.assertRaises(_A , index.search_batch , queries[0] )
__magic_name__ : List[Any] = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _A )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
import faiss
__magic_name__ : str = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__magic_name__ : str = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_A ):
__magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
import faiss
__magic_name__ : Any = faiss.IndexFlat(5 )
__magic_name__ : Optional[Any] = FaissIndex(custom_index=_A )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
import faiss
__magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
index.save(tmp_file.name )
__magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ : Dict = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Tuple = 1
__magic_name__ , __magic_name__ : Optional[Any] = index.search(_A )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
import faiss
__magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__magic_name__ : Dict = 'index.faiss'
__magic_name__ : Optional[Any] = f'mock://{index_name}'
index.save(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
__magic_name__ : List[str] = 1
__magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> Dict:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : Any = Elasticsearch()
__magic_name__ : Union[str, Any] = {'acknowledged': True}
__magic_name__ : Tuple = ElasticSearchIndex(es_client=_A )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__magic_name__ : str = 'foo'
__magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__magic_name__ : str = 'foo'
__magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A )
__magic_name__ : Tuple = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A )
# batched queries with timeout
__magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 )
__magic_name__ : Optional[int] = [scores[0] for scores in total_scores]
__magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A ) | 331 | 1 |
'''simple docstring'''
from sklearn.metrics import mean_squared_error
import datasets
lowerCAmelCase :int = '''\
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
lowerCAmelCase :int = '''\
Mean Squared Error(MSE) is the average of the square of difference between the predicted
and actual values.
'''
lowerCAmelCase :Any = '''
Args:
predictions: array-like of shape (n_samples,) or (n_samples, n_outputs)
Estimated target values.
references: array-like of shape (n_samples,) or (n_samples, n_outputs)
Ground truth (correct) target values.
sample_weight: array-like of shape (n_samples,), default=None
Sample weights.
multioutput: {"raw_values", "uniform_average"} or array-like of shape (n_outputs,), default="uniform_average"
Defines aggregating of multiple output values. Array-like value defines weights used to average errors.
"raw_values" : Returns a full set of errors in case of multioutput input.
"uniform_average" : Errors of all outputs are averaged with uniform weight.
squared : bool, default=True
If True returns MSE value, if False returns RMSE (Root Mean Squared Error) value.
Returns:
mse : mean squared error.
Examples:
>>> mse_metric = datasets.load_metric("mse")
>>> predictions = [2.5, 0.0, 2, 8]
>>> references = [3, -0.5, 2, 7]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.375}
>>> rmse_result = mse_metric.compute(predictions=predictions, references=references, squared=False)
>>> print(rmse_result)
{\'mse\': 0.6123724356957945}
If you\'re using multi-dimensional lists, then set the config as follows :
>>> mse_metric = datasets.load_metric("mse", "multilist")
>>> predictions = [[0.5, 1], [-1, 1], [7, -6]]
>>> references = [[0, 2], [-1, 2], [8, -5]]
>>> results = mse_metric.compute(predictions=predictions, references=references)
>>> print(results)
{\'mse\': 0.7083333333333334}
>>> results = mse_metric.compute(predictions=predictions, references=references, multioutput=\'raw_values\')
>>> print(results) # doctest: +NORMALIZE_WHITESPACE
{\'mse\': array([0.41666667, 1. ])}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class _lowerCamelCase ( datasets.Metric ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(self._get_feature_types() ) , reference_urls=[
'https://scikit-learn.org/stable/modules/generated/sklearn.metrics.mean_squared_error.html'
] , )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
if self.config_name == "multilist":
return {
"predictions": datasets.Sequence(datasets.Value('float' ) ),
"references": datasets.Sequence(datasets.Value('float' ) ),
}
else:
return {
"predictions": datasets.Value('float' ),
"references": datasets.Value('float' ),
}
def __lowerCAmelCase ( self : str , _A : Dict , _A : List[str] , _A : str=None , _A : Optional[Any]="uniform_average" , _A : Optional[int]=True ) -> Optional[int]:
__magic_name__ : Union[str, Any] = mean_squared_error(
_A , _A , sample_weight=_A , multioutput=_A , squared=_A )
return {"mse": mse} | 331 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() )
__magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__)
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
"""simple docstring"""
if metric == "rouge2":
__magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
__magic_name__ : int = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
__magic_name__ : List[Any] = ModelCheckpoint(
dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return EarlyStopping(
monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int:
__magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None:
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
__magic_name__ : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
__magic_name__ : List[Any] = od / 'test_results.txt'
__magic_name__ : Dict = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
__magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , 'a+' ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
__magic_name__ : Optional[Any] = metrics[key]
if isinstance(_A , torch.Tensor ):
__magic_name__ : Tuple = val.item()
__magic_name__ : int = F'{key}: {val:.6f}\n'
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
__magic_name__ : str = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_A )
@rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple:
try:
__magic_name__ : str = pl_module.model.model.num_parameters()
except AttributeError:
__magic_name__ : List[str] = pl_module.model.num_parameters()
__magic_name__ : List[Any] = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , 'test' )
@rank_zero_only
def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 331 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
lowerCAmelCase :Any = logging.get_logger(__name__)
lowerCAmelCase :Any = {
'''facebook/dpr-ctx_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-single-nq-base''': (
'''https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/config.json'''
),
'''facebook/dpr-ctx_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-question_encoder-multiset-base''': (
'''https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/config.json'''
),
'''facebook/dpr-reader-multiset-base''': (
'''https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/config.json'''
),
}
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : int = """dpr"""
def __init__( self : Dict , _A : Tuple=30522 , _A : Optional[int]=768 , _A : Any=12 , _A : Optional[Any]=12 , _A : Dict=3072 , _A : Optional[int]="gelu" , _A : Optional[Any]=0.1 , _A : Union[str, Any]=0.1 , _A : Optional[int]=512 , _A : Optional[int]=2 , _A : Optional[int]=0.02 , _A : Tuple=1E-12 , _A : List[str]=0 , _A : Optional[int]="absolute" , _A : int = 0 , **_A : Union[str, Any] , ) -> Optional[int]:
super().__init__(pad_token_id=_A , **_A )
__magic_name__ : Any = vocab_size
__magic_name__ : Union[str, Any] = hidden_size
__magic_name__ : Optional[int] = num_hidden_layers
__magic_name__ : int = num_attention_heads
__magic_name__ : Optional[Any] = hidden_act
__magic_name__ : Union[str, Any] = intermediate_size
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : List[Any] = attention_probs_dropout_prob
__magic_name__ : Any = max_position_embeddings
__magic_name__ : Optional[int] = type_vocab_size
__magic_name__ : List[str] = initializer_range
__magic_name__ : Optional[Any] = layer_norm_eps
__magic_name__ : Union[str, Any] = projection_dim
__magic_name__ : int = position_embedding_type | 331 |
'''simple docstring'''
def lowerCamelCase ( ):
"""simple docstring"""
return 1
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int = 200 ):
"""simple docstring"""
return two_pound(lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 331 | 1 |
'''simple docstring'''
import copy
from dataclasses import dataclass, field
from typing import ClassVar, Dict
from ..features import ClassLabel, Features, Value
from .base import TaskTemplate
@dataclass(frozen=lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : str = field(default="""text-classification""" , metadata={"""include_in_asdict_even_if_is_default""": True} )
A_ : ClassVar[Features] = Features({"""text""": Value("""string""" )} )
A_ : ClassVar[Features] = Features({"""labels""": ClassLabel} )
A_ : str = "text"
A_ : str = "labels"
def __lowerCAmelCase ( self : Dict , _A : int ) -> Tuple:
if self.label_column not in features:
raise ValueError(F'Column {self.label_column} is not present in features.' )
if not isinstance(features[self.label_column] , _A ):
raise ValueError(F'Column {self.label_column} is not a ClassLabel.' )
__magic_name__ : List[str] = copy.deepcopy(self )
__magic_name__ : Optional[int] = self.label_schema.copy()
__magic_name__ : Union[str, Any] = features[self.label_column]
__magic_name__ : Dict = label_schema
return task_template
@property
def __lowerCAmelCase ( self : Any ) -> Dict[str, str]:
return {
self.text_column: "text",
self.label_column: "labels",
} | 331 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Dict = ["""flax""", """transformers"""]
def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[int] = ["""flax""", """transformers"""]
def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] ) | 331 | 1 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
if is_sentencepiece_available():
from ..ta.tokenization_ta import TaTokenizer
else:
from ...utils.dummy_sentencepiece_objects import TaTokenizer
UpperCAmelCase__ = TaTokenizer
if is_tokenizers_available():
from ..ta.tokenization_ta_fast import TaTokenizerFast
else:
from ...utils.dummy_tokenizers_objects import TaTokenizerFast
UpperCAmelCase__ = TaTokenizerFast
UpperCAmelCase__ = {"configuration_mt5": ["MT5Config", "MT5OnnxConfig"]}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = [
"MT5EncoderModel",
"MT5ForConditionalGeneration",
"MT5ForQuestionAnswering",
"MT5Model",
"MT5PreTrainedModel",
"MT5Stack",
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["TFMT5EncoderModel", "TFMT5ForConditionalGeneration", "TFMT5Model"]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ = ["FlaxMT5EncoderModel", "FlaxMT5ForConditionalGeneration", "FlaxMT5Model"]
if TYPE_CHECKING:
from .configuration_mta import MTaConfig, MTaOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mta import (
MTaEncoderModel,
MTaForConditionalGeneration,
MTaForQuestionAnswering,
MTaModel,
MTaPreTrainedModel,
MTaStack,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mta import TFMTaEncoderModel, TFMTaForConditionalGeneration, TFMTaModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mta import FlaxMTaEncoderModel, FlaxMTaForConditionalGeneration, FlaxMTaModel
else:
import sys
UpperCAmelCase__ = _LazyModule(
__name__,
globals()["__file__"],
_import_structure,
extra_objects={"MT5Tokenizer": MTaTokenizer, "MT5TokenizerFast": MTaTokenizerFast},
module_spec=__spec__,
)
| 0 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase :Tuple = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any:
super().__init__(*_A , **_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]:
__magic_name__ : Union[str, Any] = {}
__magic_name__ : Optional[Any] = {}
if prompt is not None:
__magic_name__ : Union[str, Any] = prompt
if generate_kwargs is not None:
__magic_name__ : str = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__magic_name__ : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
__magic_name__ : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict:
__magic_name__ : List[Any] = load_image(_A )
if prompt is not None:
if not isinstance(_A , _A ):
raise ValueError(
F'Received an invalid text input, got - {type(_A )} - but expected a single string. '
'Note also that one single text can be provided for conditional image to text generation.' )
__magic_name__ : Any = self.model.config.model_type
if model_type == "git":
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids
__magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids
__magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
__magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework )
model_inputs.update(_A )
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation' )
else:
__magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__magic_name__ : int = None
return model_inputs
def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _A )
and all(x is None for x in model_inputs['input_ids'] )
):
__magic_name__ : str = None
if generate_kwargs is None:
__magic_name__ : Optional[int] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name )
__magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A )
return model_outputs
def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]:
__magic_name__ : Optional[Any] = []
for output_ids in model_outputs:
__magic_name__ : Union[str, Any] = {
'generated_text': self.tokenizer.decode(
_A , skip_special_tokens=_A , )
}
records.append(_A )
return records | 331 | 0 |
'''simple docstring'''
import json
import sys
import tempfile
import unittest
from pathlib import Path
import transformers
from transformers import (
CONFIG_MAPPING,
FEATURE_EXTRACTOR_MAPPING,
AutoConfig,
AutoFeatureExtractor,
WavaVecaConfig,
WavaVecaFeatureExtractor,
)
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, get_tests_dir
sys.path.append(str(Path(__file__).parent.parent.parent.parent / 'utils'))
from test_module.custom_configuration import CustomConfig # noqa E402
from test_module.custom_feature_extraction import CustomFeatureExtractor # noqa E402
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_tests_dir('fixtures')
SCREAMING_SNAKE_CASE_: List[Any] =get_tests_dir('fixtures/dummy_feature_extractor_config.json')
SCREAMING_SNAKE_CASE_: Union[str, Any] =get_tests_dir('fixtures/dummy-config.json')
class __A ( unittest.TestCase ):
def _lowercase (self : List[Any] ):
UpperCAmelCase_ = 0
def _lowercase (self : Any ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained("facebook/wav2vec2-base-960h" )
self.assertIsInstance(__a , __a )
def _lowercase (self : Any ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Dict ):
with tempfile.TemporaryDirectory() as tmpdirname:
UpperCAmelCase_ = WavaVecaConfig()
# remove feature_extractor_type to make sure config.json alone is enough to load feature processor locally
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a ).to_dict()
config_dict.pop("feature_extractor_type" )
UpperCAmelCase_ = WavaVecaFeatureExtractor(**__a )
# save in new folder
model_config.save_pretrained(__a )
config.save_pretrained(__a )
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a )
# make sure private variable is not incorrectly saved
UpperCAmelCase_ = json.loads(config.to_json_string() )
self.assertTrue("_processor_class" not in dict_as_saved )
self.assertIsInstance(__a , __a )
def _lowercase (self : Dict ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
def _lowercase (self : Dict ):
with self.assertRaisesRegex(
__a , "bert-base is not a local folder and is not a valid model identifier" ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained("bert-base" )
def _lowercase (self : List[str] ):
with self.assertRaisesRegex(
__a , r"aaaaaa is not a valid git identifier \(branch name, tag name or commit id\)" ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a , revision="aaaaaa" )
def _lowercase (self : List[str] ):
with self.assertRaisesRegex(
__a , "hf-internal-testing/config-no-model does not appear to have a file named preprocessor_config.json." , ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained("hf-internal-testing/config-no-model" )
def _lowercase (self : int ):
# If remote code is not set, we will time out when asking whether to load the model.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
# If remote code is disabled, we can't load this config.
with self.assertRaises(__a ):
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
# Test feature extractor can be reloaded.
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a , trust_remote_code=__a )
self.assertEqual(reloaded_feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
def _lowercase (self : Union[str, Any] ):
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# Trying to register something existing in the Transformers library will raise an error
with self.assertRaises(__a ):
AutoFeatureExtractor.register(__a , __a )
# Now that the config is registered, it can be used as any other config with the auto-API
UpperCAmelCase_ = CustomFeatureExtractor.from_pretrained(__a )
with tempfile.TemporaryDirectory() as tmp_dir:
feature_extractor.save_pretrained(__a )
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(__a )
self.assertIsInstance(__a , __a )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
def _lowercase (self : List[Any] ):
class __A ( UpperCamelCase__ ):
a__ : Optional[Any] = True
try:
AutoConfig.register("custom" , __a )
AutoFeatureExtractor.register(__a , __a )
# If remote code is not set, the default is to use local
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote code is disabled, we load the local one.
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(feature_extractor.is_local )
# If remote is enabled, we load from the Hub
UpperCAmelCase_ = AutoFeatureExtractor.from_pretrained(
"hf-internal-testing/test_dynamic_feature_extractor" , trust_remote_code=__a )
self.assertEqual(feature_extractor.__class__.__name__ , "NewFeatureExtractor" )
self.assertTrue(not hasattr(__a , "is_local" ) )
finally:
if "custom" in CONFIG_MAPPING._extra_content:
del CONFIG_MAPPING._extra_content["custom"]
if CustomConfig in FEATURE_EXTRACTOR_MAPPING._extra_content:
del FEATURE_EXTRACTOR_MAPPING._extra_content[CustomConfig]
| 1 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCAmelCase :Dict = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
lowerCAmelCase :str = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCAmelCase :Any = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys())
lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class _lowerCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_A )
__magic_name__ : List[str] = 0
__magic_name__ : Union[str, Any] = Path(self.hparams.output_dir )
__magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__magic_name__ : Optional[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , )
else:
__magic_name__ : PretrainedConfig = config
__magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , _A , _A ):
assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , _A , getattr(self.hparams , _A ) )
if tokenizer is None:
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , )
else:
__magic_name__ : PreTrainedTokenizer = tokenizer
__magic_name__ : Optional[int] = MODEL_MODES[mode]
if model is None:
__magic_name__ : Tuple = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , )
else:
__magic_name__ : str = model
def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple:
__magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler]
__magic_name__ : str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : Optional[Any] = self.model
__magic_name__ : int = ['bias', 'LayerNorm.weight']
__magic_name__ : Dict = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__magic_name__ : str = Adafactor(
_A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A )
else:
__magic_name__ : Tuple = AdamW(
_A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__magic_name__ : List[str] = optimizer
__magic_name__ : int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]:
return self.validation_step(_A , _A )
def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any:
return self.validation_end(_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
__magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str:
if stage == "test":
__magic_name__ : Any = len(self.test_dataloader().dataset )
else:
__magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A )
__magic_name__ : int = len(self.train_dataloader().dataset )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]:
raise NotImplementedError('You must implement this for your task' )
def __lowerCAmelCase ( self : int ) -> List[str]:
return self.train_loader
def __lowerCAmelCase ( self : Tuple ) -> int:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
_A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None:
__magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' )
__magic_name__ : List[Any] = self.step_count
self.model.save_pretrained(_A )
self.tokenizer.save_pretrained(_A )
@staticmethod
def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple:
parser.add_argument(
'--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A )
parser.add_argument('--train_batch_size' , default=32 , type=_A )
parser.add_argument('--eval_batch_size' , default=32 , type=_A )
parser.add_argument('--adafactor' , action='store_true' )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_A )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]:
__magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler']
__magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_A )
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]:
rank_zero_info('***** Validation results *****' )
__magic_name__ : str = trainer.callback_metrics
# Log results
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]:
rank_zero_info('***** Test results *****' )
__magic_name__ : Optional[int] = trainer.callback_metrics
# Log and save results to file
__magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(_A , 'w' ) as writer:
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__magic_name__ : Any = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
__magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase )
if logging_callback is None:
__magic_name__ : Dict = LoggingCallback()
__magic_name__ : List[str] = {}
if args.fpaa:
__magic_name__ : Dict = 16
if args.gpus > 1:
__magic_name__ : Tuple = 'auto'
__magic_name__ : int = 'ddp'
__magic_name__ : str = args.accumulate_grad_batches
__magic_name__ : str = None
__magic_name__ : List[str] = 'auto'
__magic_name__ : List[Any] = pl.Trainer.from_argparse_args(
lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , )
if args.do_train:
trainer.fit(lowerCAmelCase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer | 331 | 0 |
'''simple docstring'''
import argparse
import json
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from torchvision import transforms
from transformers import BitImageProcessor, FocalNetConfig, FocalNetForImageClassification
from transformers.image_utils import IMAGENET_DEFAULT_MEAN, IMAGENET_DEFAULT_STD, PILImageResampling
def _SCREAMING_SNAKE_CASE (A ) -> int:
"""simple docstring"""
lowercase__ = [2, 2, 6, 2] if '''tiny''' in model_name else [2, 2, 18, 2]
lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False
lowercase__ = True if '''large''' in model_name or '''huge''' in model_name else False
if "large" in model_name or "xlarge" in model_name or "huge" in model_name:
if "fl3" in model_name:
lowercase__ = [3, 3, 3, 3]
lowercase__ = [5, 5, 5, 5]
elif "fl4" in model_name:
lowercase__ = [4, 4, 4, 4]
lowercase__ = [3, 3, 3, 3]
if "tiny" in model_name or "small" in model_name or "base" in model_name:
lowercase__ = [3, 3, 3, 3]
if "lrf" in model_name:
lowercase__ = [3, 3, 3, 3]
else:
lowercase__ = [2, 2, 2, 2]
if "tiny" in model_name:
lowercase__ = 96
elif "small" in model_name:
lowercase__ = 96
elif "base" in model_name:
lowercase__ = 128
elif "large" in model_name:
lowercase__ = 192
elif "xlarge" in model_name:
lowercase__ = 256
elif "huge" in model_name:
lowercase__ = 352
# set label information
lowercase__ = '''huggingface/label-files'''
if "large" in model_name or "huge" in model_name:
lowercase__ = '''imagenet-22k-id2label.json'''
else:
lowercase__ = '''imagenet-1k-id2label.json'''
lowercase__ = json.load(open(hf_hub_download(A , A , repo_type='''dataset''' ) , '''r''' ) )
lowercase__ = {int(A ): v for k, v in idalabel.items()}
lowercase__ = {v: k for k, v in idalabel.items()}
lowercase__ = FocalNetConfig(
embed_dim=A , depths=A , focal_levels=A , focal_windows=A , use_conv_embed=A , idalabel=A , labelaid=A , use_post_layernorm=A , use_layerscale=A , )
return config
def _SCREAMING_SNAKE_CASE (A ) -> Dict:
"""simple docstring"""
if "patch_embed.proj" in name:
lowercase__ = name.replace('''patch_embed.proj''' , '''embeddings.patch_embeddings.projection''' )
if "patch_embed.norm" in name:
lowercase__ = name.replace('''patch_embed.norm''' , '''embeddings.norm''' )
if "layers" in name:
lowercase__ = '''encoder.''' + name
if "encoder.layers" in name:
lowercase__ = name.replace('''encoder.layers''' , '''encoder.stages''' )
if "downsample.proj" in name:
lowercase__ = name.replace('''downsample.proj''' , '''downsample.projection''' )
if "blocks" in name:
lowercase__ = name.replace('''blocks''' , '''layers''' )
if "modulation.f.weight" in name or "modulation.f.bias" in name:
lowercase__ = name.replace('''modulation.f''' , '''modulation.projection_in''' )
if "modulation.h.weight" in name or "modulation.h.bias" in name:
lowercase__ = name.replace('''modulation.h''' , '''modulation.projection_context''' )
if "modulation.proj.weight" in name or "modulation.proj.bias" in name:
lowercase__ = name.replace('''modulation.proj''' , '''modulation.projection_out''' )
if name == "norm.weight":
lowercase__ = '''layernorm.weight'''
if name == "norm.bias":
lowercase__ = '''layernorm.bias'''
if "head" in name:
lowercase__ = name.replace('''head''' , '''classifier''' )
else:
lowercase__ = '''focalnet.''' + name
return name
def _SCREAMING_SNAKE_CASE (A , A , A=False ) -> Any:
"""simple docstring"""
lowercase__ = {
'''focalnet-tiny''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_srf.pth''',
'''focalnet-tiny-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_tiny_lrf.pth''',
'''focalnet-small''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_srf.pth''',
'''focalnet-small-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_small_lrf.pth''',
'''focalnet-base''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_srf.pth''',
'''focalnet-base-lrf''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_base_lrf.pth''',
'''focalnet-large-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384.pth''',
'''focalnet-large-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_large_lrf_384_fl4.pth''',
'''focalnet-xlarge-lrf-fl3''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384.pth''',
'''focalnet-xlarge-lrf-fl4''': '''https://projects4jw.blob.core.windows.net/focalnet/release/classification/focalnet_xlarge_lrf_384_fl4.pth''',
}
# fmt: on
lowercase__ = model_name_to_url[model_name]
print('''Checkpoint URL: ''' , A )
lowercase__ = torch.hub.load_state_dict_from_url(A , map_location='''cpu''' )['''model''']
# rename keys
for key in state_dict.copy().keys():
lowercase__ = state_dict.pop(A )
lowercase__ = val
lowercase__ = get_focalnet_config(A )
lowercase__ = FocalNetForImageClassification(A )
model.eval()
# load state dict
model.load_state_dict(A )
# verify conversion
lowercase__ = '''http://images.cocodataset.org/val2017/000000039769.jpg'''
lowercase__ = BitImageProcessor(
do_resize=A , size={'''shortest_edge''': 256} , resample=PILImageResampling.BILINEAR , do_center_crop=A , crop_size=224 , do_normalize=A , image_mean=A , image_std=A , )
lowercase__ = Image.open(requests.get(A , stream=A ).raw )
lowercase__ = processor(images=A , return_tensors='''pt''' )
lowercase__ = transforms.Compose(
[
transforms.Resize(256 ),
transforms.CenterCrop(224 ),
transforms.ToTensor(),
transforms.Normalize(mean=[0.485, 0.456, 0.406] , std=[0.229, 0.224, 0.225] ),
] )
lowercase__ = image_transforms(A ).unsqueeze(0 )
# verify pixel_values
assert torch.allclose(inputs.pixel_values , A , atol=1E-4 )
lowercase__ = model(**A )
lowercase__ = outputs.logits.argmax(-1 ).item()
print('''Predicted class:''' , model.config.idalabel[predicted_class_idx] )
print('''First values of logits:''' , outputs.logits[0, :3] )
if model_name == "focalnet-tiny":
lowercase__ = torch.tensor([0.2_166, -0.4_368, 0.2_191] )
elif model_name == "focalnet-tiny-lrf":
lowercase__ = torch.tensor([1.1_669, 0.0_125, -0.1_695] )
elif model_name == "focalnet-small":
lowercase__ = torch.tensor([0.4_917, -0.0_430, 0.1_341] )
elif model_name == "focalnet-small-lrf":
lowercase__ = torch.tensor([-0.2_588, -0.5_342, -0.2_331] )
elif model_name == "focalnet-base":
lowercase__ = torch.tensor([-0.1_655, -0.4_090, -0.1_730] )
elif model_name == "focalnet-base-lrf":
lowercase__ = torch.tensor([0.5_306, -0.0_483, -0.3_928] )
assert torch.allclose(outputs.logits[0, :3] , A , atol=1E-4 )
print('''Looks ok!''' )
if pytorch_dump_folder_path is not None:
print(f"Saving model and processor of {model_name} to {pytorch_dump_folder_path}" )
model.save_pretrained(A )
processor.save_pretrained(A )
if push_to_hub:
print(f"Pushing model and processor of {model_name} to the hub..." )
model.push_to_hub(f"{model_name}" )
processor.push_to_hub(f"{model_name}" )
if __name__ == "__main__":
lowerCamelCase : str = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='focalnet-tiny',
type=str,
help='Name of the FocalNet model you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub.',
)
lowerCamelCase : Union[str, Any] = parser.parse_args()
convert_focalnet_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub)
| 2 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Dict = (DDPMScheduler,)
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str:
__magic_name__ : str = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_A )
return config
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Dict = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self : Tuple ) -> int:
__magic_name__ : Tuple = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : str = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Union[str, Any] = self.dummy_model()
__magic_name__ : List[Any] = self.dummy_sample_deter
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : Tuple = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : Dict = pred_prev_sample
__magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) )
__magic_name__ : Dict = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
__magic_name__ : Any = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Dict = self.dummy_model()
__magic_name__ : str = self.dummy_sample_deter
__magic_name__ : str = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : List[Any] = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : List[Any] = pred_prev_sample
__magic_name__ : int = torch.sum(torch.abs(_A ) )
__magic_name__ : Any = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowerCAmelCase ( self : List[str] ) -> str:
__magic_name__ : Dict = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Optional[Any] = scheduler_class(**_A )
__magic_name__ : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_A )
__magic_name__ : List[str] = scheduler.timesteps
for i, timestep in enumerate(_A ):
if i == len(_A ) - 1:
__magic_name__ : Optional[int] = -1
else:
__magic_name__ : List[Any] = timesteps[i + 1]
__magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A )
__magic_name__ : Any = prev_t.item()
self.assertEqual(_A , _A )
def __lowerCAmelCase ( self : Tuple ) -> str:
__magic_name__ : str = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 1, 0]
__magic_name__ : Tuple = len(_A )
with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_A ) | 331 | 0 |
'''simple docstring'''
import math
from typing import Callable, List, Optional, Union
import numpy as np
import PIL
import torch
from PIL import Image
from transformers import CLIPTextModel, CLIPTokenizer
from diffusers.models import AutoencoderKL, UNetaDConditionModel
from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_upscale import StableDiffusionUpscalePipeline
from diffusers.schedulers import DDIMScheduler, DDPMScheduler, LMSDiscreteScheduler, PNDMScheduler
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__=[] ):
'''simple docstring'''
A : Union[str, Any] = size[0] - overlap_pixels * 2
A : str = size[1] - overlap_pixels * 2
for letter in ["l", "r"]:
if letter in remove_borders:
size_x += overlap_pixels
for letter in ["t", "b"]:
if letter in remove_borders:
size_y += overlap_pixels
A : str = np.ones((size_y, size_x) , dtype=np.uinta ) * 255
A : Dict = np.pad(snake_case__ , mode='''linear_ramp''' , pad_width=snake_case__ , end_values=0 )
if "l" in remove_borders:
A : Any = mask[:, overlap_pixels : mask.shape[1]]
if "r" in remove_borders:
A : Any = mask[:, 0 : mask.shape[1] - overlap_pixels]
if "t" in remove_borders:
A : Any = mask[overlap_pixels : mask.shape[0], :]
if "b" in remove_borders:
A : Union[str, Any] = mask[0 : mask.shape[0] - overlap_pixels, :]
return mask
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
return max(snake_case__ , min(snake_case__ , snake_case__ ) )
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
return (
clamp(rect[0] , min[0] , max[0] ),
clamp(rect[1] , min[1] , max[1] ),
clamp(rect[2] , min[0] , max[0] ),
clamp(rect[3] , min[1] , max[1] ),
)
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : List[Any] = list(snake_case__ )
rect[0] -= overlap
rect[1] -= overlap
rect[2] += overlap
rect[3] += overlap
A : str = clamp_rect(snake_case__ , [0, 0] , [image_size[0], image_size[1]] )
return rect
def lowerCAmelCase_ ( snake_case__ , snake_case__ , snake_case__ , snake_case__ ):
'''simple docstring'''
A : int = Image.new('''RGB''' , (tile.size[0] + original_slice, tile.size[1]) )
result.paste(
original_image.resize((tile.size[0], tile.size[1]) , Image.BICUBIC ).crop(
(slice_x, 0, slice_x + original_slice, tile.size[1]) ) , (0, 0) , )
result.paste(snake_case__ , (original_slice, 0) )
return result
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Dict = (original_image_slice * 4, 0, tile.size[0], tile.size[1])
A : Union[str, Any] = tile.crop(snake_case__ )
return tile
def lowerCAmelCase_ ( snake_case__ , snake_case__ ):
'''simple docstring'''
A : Union[str, Any] = n % d
return n - divisor
class A ( __snake_case ):
def __init__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 350 , ) -> List[Any]:
"""simple docstring"""
super().__init__(
vae=SCREAMING_SNAKE_CASE , text_encoder=SCREAMING_SNAKE_CASE , tokenizer=SCREAMING_SNAKE_CASE , unet=SCREAMING_SNAKE_CASE , low_res_scheduler=SCREAMING_SNAKE_CASE , scheduler=SCREAMING_SNAKE_CASE , max_noise_level=SCREAMING_SNAKE_CASE , )
def __lowerCAmelCase ( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ) -> Optional[int]:
"""simple docstring"""
torch.manual_seed(0 )
A : List[Any] = (
min(image.size[0] - (tile_size + original_image_slice) , x * tile_size ),
min(image.size[1] - (tile_size + original_image_slice) , y * tile_size ),
min(image.size[0] , (x + 1) * tile_size ),
min(image.size[1] , (y + 1) * tile_size ),
)
A : List[Any] = add_overlap_rect(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , image.size )
A : Dict = image.crop(SCREAMING_SNAKE_CASE )
A : Tuple = ((crop_rect[0] + ((crop_rect[2] - crop_rect[0]) / 2)) / image.size[0]) * tile.size[0]
A : Any = translated_slice_x - (original_image_slice / 2)
A : Optional[Any] = max(0 , SCREAMING_SNAKE_CASE )
A : List[str] = squeeze_tile(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : List[str] = to_input.size
A : Optional[int] = to_input.resize((tile_size, tile_size) , Image.BICUBIC )
A : str = super(SCREAMING_SNAKE_CASE , self ).__call__(image=SCREAMING_SNAKE_CASE , **SCREAMING_SNAKE_CASE ).images[0]
A : str = upscaled_tile.resize((orig_input_size[0] * 4, orig_input_size[1] * 4) , Image.BICUBIC )
A : int = unsqueeze_tile(SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE )
A : List[Any] = upscaled_tile.resize((tile.size[0] * 4, tile.size[1] * 4) , Image.BICUBIC )
A : Optional[int] = []
if x == 0:
remove_borders.append('''l''' )
elif crop_rect[2] == image.size[0]:
remove_borders.append('''r''' )
if y == 0:
remove_borders.append('''t''' )
elif crop_rect[3] == image.size[1]:
remove_borders.append('''b''' )
A : Optional[Any] = Image.fromarray(
make_transparency_mask(
(upscaled_tile.size[0], upscaled_tile.size[1]) , tile_border * 4 , remove_borders=SCREAMING_SNAKE_CASE ) , mode='''L''' , )
final_image.paste(
SCREAMING_SNAKE_CASE , (crop_rect_with_overlap[0] * 4, crop_rect_with_overlap[1] * 4) , SCREAMING_SNAKE_CASE )
@torch.no_grad()
def __call__( self , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE = 75 , SCREAMING_SNAKE_CASE = 9.0 , SCREAMING_SNAKE_CASE = 50 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 0.0 , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = None , SCREAMING_SNAKE_CASE = 1 , SCREAMING_SNAKE_CASE = 128 , SCREAMING_SNAKE_CASE = 32 , SCREAMING_SNAKE_CASE = 32 , ) -> Dict:
"""simple docstring"""
A : str = Image.new('''RGB''' , (image.size[0] * 4, image.size[1] * 4) )
A : Tuple = math.ceil(image.size[0] / tile_size )
A : List[Any] = math.ceil(image.size[1] / tile_size )
A : Optional[int] = tcx * tcy
A : int = 0
for y in range(SCREAMING_SNAKE_CASE ):
for x in range(SCREAMING_SNAKE_CASE ):
self._process_tile(
SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , SCREAMING_SNAKE_CASE , prompt=SCREAMING_SNAKE_CASE , num_inference_steps=SCREAMING_SNAKE_CASE , guidance_scale=SCREAMING_SNAKE_CASE , noise_level=SCREAMING_SNAKE_CASE , negative_prompt=SCREAMING_SNAKE_CASE , num_images_per_prompt=SCREAMING_SNAKE_CASE , eta=SCREAMING_SNAKE_CASE , generator=SCREAMING_SNAKE_CASE , latents=SCREAMING_SNAKE_CASE , )
current_count += 1
if callback is not None:
callback({'''progress''': current_count / total_tile_count, '''image''': final_image} )
return final_image
def lowerCAmelCase_ ( ):
'''simple docstring'''
A : Dict = '''stabilityai/stable-diffusion-x4-upscaler'''
A : int = StableDiffusionTiledUpscalePipeline.from_pretrained(snake_case__ , revision='''fp16''' , torch_dtype=torch.floataa )
A : Dict = pipe.to('''cuda''' )
A : Tuple = Image.open('''../../docs/source/imgs/diffusers_library.jpg''' )
def callback(snake_case__ ):
print(F'progress: {obj["progress"]:.4f}' )
obj["image"].save('''diffusers_library_progress.jpg''' )
A : Optional[int] = pipe(image=snake_case__ , prompt='''Black font, white background, vector''' , noise_level=40 , callback=snake_case__ )
final_image.save('''diffusers_library.jpg''' )
if __name__ == "__main__":
main()
| 3 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = IFInpaintingPipeline
A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
return self._get_dummy_components()
def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]:
if str(_A ).startswith('mps' ):
__magic_name__ : Optional[Any] = torch.manual_seed(_A )
else:
__magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCAmelCase ( self : List[Any] ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __lowerCAmelCase ( self : Dict ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self : Tuple ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
self._test_save_load_local()
def __lowerCAmelCase ( self : Any ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 331 | 0 |
'''simple docstring'''
import argparse
import torch
from ...utils import logging
from . import AlbertConfig, AlbertForPreTraining, load_tf_weights_in_albert
logging.set_verbosity_info()
def a_ ( lowerCamelCase : Tuple , lowerCamelCase : Dict , lowerCamelCase : Tuple ):
# Initialise PyTorch model
lowerCAmelCase = AlbertConfig.from_json_file(lowerCamelCase )
print(f'''Building PyTorch model from configuration: {config}''' )
lowerCAmelCase = AlbertForPreTraining(lowerCamelCase )
# Load weights from tf checkpoint
load_tf_weights_in_albert(lowerCamelCase , lowerCamelCase , lowerCamelCase )
# Save pytorch-model
print(f'''Save PyTorch model to {pytorch_dump_path}''' )
torch.save(model.state_dict() , lowerCamelCase )
if __name__ == "__main__":
__snake_case =argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"""--tf_checkpoint_path""", default=None, type=str, required=True, help="""Path to the TensorFlow checkpoint path."""
)
parser.add_argument(
"""--albert_config_file""",
default=None,
type=str,
required=True,
help=(
"""The config json file corresponding to the pre-trained ALBERT model. \n"""
"""This specifies the model architecture."""
),
)
parser.add_argument(
"""--pytorch_dump_path""", default=None, type=str, required=True, help="""Path to the output PyTorch model."""
)
__snake_case =parser.parse_args()
convert_tf_checkpoint_to_pytorch(args.tf_checkpoint_path, args.albert_config_file, args.pytorch_dump_path)
| 4 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict:
__magic_name__ : Union[str, Any] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Optional[int] = seq_length
__magic_name__ : List[str] = is_training
__magic_name__ : Optional[Any] = use_input_mask
__magic_name__ : Dict = use_token_type_ids
__magic_name__ : str = use_labels
__magic_name__ : int = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : Dict = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Any = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Any = type_vocab_size
__magic_name__ : Union[str, Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : str = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : Any = relative_attention
__magic_name__ : str = position_biased_input
__magic_name__ : str = pos_att_type
__magic_name__ : Union[str, Any] = scope
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_input_mask:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__magic_name__ : int = None
if self.use_token_type_ids:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] = None
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
if self.use_labels:
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.get_config()
__magic_name__ : Union[str, Any] = 300
return config
def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]:
__magic_name__ : Dict = DebertaModel(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0]
__magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0]
__magic_name__ : List[str] = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict:
__magic_name__ : List[str] = DebertaForMaskedLM(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]:
__magic_name__ : Optional[int] = self.num_labels
__magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A )
model.to(_A )
model.eval()
__magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]:
__magic_name__ : str = self.num_labels
__magic_name__ : int = DebertaForTokenClassification(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]:
__magic_name__ : int = DebertaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[int] = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : int = config_and_inputs
__magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Tuple = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Union[str, Any] = True
A_ : Any = False
A_ : Dict = False
A_ : str = False
A_ : Dict = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : List[str] = DebertaModelTester(self )
__magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : Any ) -> str:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def __lowerCAmelCase ( self : str ) -> List[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : int = DebertaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
pass
@slow
def __lowerCAmelCase ( self : Dict ) -> Tuple:
__magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' )
__magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0]
# compare the actual values for a slice.
__magic_name__ : Tuple = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' ) | 331 | 0 |
from __future__ import annotations
UpperCAmelCase__ = [
[-1, 0], # left
[0, -1], # down
[1, 0], # right
[0, 1], # up
]
def UpperCAmelCase_ ( __snake_case , __snake_case , __snake_case , __snake_case , __snake_case , ) -> tuple[list[list[int]], list[list[int]]]:
"""simple docstring"""
_lowercase =[
[0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) )
] # the reference grid
_lowercase =1
_lowercase =[
[0 for col in range(len(grid[0] ) )] for row in range(len(__snake_case ) )
] # the action grid
_lowercase =init[0]
_lowercase =init[1]
_lowercase =0
_lowercase =g + heuristic[x][y] # cost from starting cell to destination cell
_lowercase =[[f, g, x, y]]
_lowercase =False # flag that is set when search is complete
_lowercase =False # flag set if we can't find expand
while not found and not resign:
if len(__snake_case ) == 0:
raise ValueError('''Algorithm is unable to find solution''' )
else: # to choose the least costliest action so as to move closer to the goal
cell.sort()
cell.reverse()
_lowercase =cell.pop()
_lowercase =next_cell[2]
_lowercase =next_cell[3]
_lowercase =next_cell[1]
if x == goal[0] and y == goal[1]:
_lowercase =True
else:
for i in range(len(__snake_case ) ): # to try out different valid actions
_lowercase =x + DIRECTIONS[i][0]
_lowercase =y + DIRECTIONS[i][1]
if xa >= 0 and xa < len(__snake_case ) and ya >= 0 and ya < len(grid[0] ):
if closed[xa][ya] == 0 and grid[xa][ya] == 0:
_lowercase =g + cost
_lowercase =ga + heuristic[xa][ya]
cell.append([fa, ga, xa, ya] )
_lowercase =1
_lowercase =i
_lowercase =[]
_lowercase =goal[0]
_lowercase =goal[1]
invpath.append([x, y] ) # we get the reverse path from here
while x != init[0] or y != init[1]:
_lowercase =x - DIRECTIONS[action[x][y]][0]
_lowercase =y - DIRECTIONS[action[x][y]][1]
_lowercase =xa
_lowercase =ya
invpath.append([x, y] )
_lowercase =[]
for i in range(len(__snake_case ) ):
path.append(invpath[len(__snake_case ) - 1 - i] )
return path, action
if __name__ == "__main__":
UpperCAmelCase__ = [
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0], # 0 are free path whereas 1's are obstacles
[0, 1, 0, 0, 0, 0],
[0, 1, 0, 0, 1, 0],
[0, 0, 0, 0, 1, 0],
]
UpperCAmelCase__ = [0, 0]
# all coordinates are given in format [y,x]
UpperCAmelCase__ = [len(grid) - 1, len(grid[0]) - 1]
UpperCAmelCase__ = 1
# the cost map which pushes the path closer to the goal
UpperCAmelCase__ = [[0 for row in range(len(grid[0]))] for col in range(len(grid))]
for i in range(len(grid)):
for j in range(len(grid[0])):
UpperCAmelCase__ = abs(i - goal[0]) + abs(j - goal[1])
if grid[i][j] == 1:
# added extra penalty in the heuristic map
UpperCAmelCase__ = 99
UpperCAmelCase__ ,UpperCAmelCase__ = search(grid, init, goal, cost, heuristic)
print('''ACTION MAP''')
for i in range(len(action)):
print(action[i])
for i in range(len(path)):
print(path[i])
| 5 |
'''simple docstring'''
class _lowerCamelCase : # Public class to implement a graph
'''simple docstring'''
def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
__magic_name__ : Tuple = row
__magic_name__ : str = col
__magic_name__ : Optional[Any] = graph
def __lowerCAmelCase ( self : Any , _A : int , _A : int , _A : list[list[bool]] ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
# Checking all 8 elements surrounding nth element
__magic_name__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__magic_name__ : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1]
__magic_name__ : Optional[int] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A )
def __lowerCAmelCase ( self : int ) -> int: # And finally, count all islands.
__magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )]
__magic_name__ : Any = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_A , _A , _A )
count += 1
return count | 331 | 0 |
from sklearn.metrics import recall_score
import datasets
A : Optional[Any] = '\nRecall is the fraction of the positive examples that were correctly labeled by the model as positive. It can be computed with the equation:\nRecall = TP / (TP + FN)\nWhere TP is the true positives and FN is the false negatives.\n'
A : Optional[Any] = '\nArgs:\n- **predictions** (`list` of `int`): The predicted labels.\n- **references** (`list` of `int`): The ground truth labels.\n- **labels** (`list` of `int`): The set of labels to include when `average` is not set to `binary`, and their order when average is `None`. Labels present in the data can be excluded in this input, for example to calculate a multiclass average ignoring a majority negative class, while labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in y_true and y_pred are used in sorted order. Defaults to None.\n- **pos_label** (`int`): The class label to use as the \'positive class\' when calculating the recall. Defaults to `1`.\n- **average** (`string`): This parameter is required for multiclass/multilabel targets. If None, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.\n - `\'binary\'`: Only report results for the class specified by `pos_label`. This is applicable only if the target labels and predictions are binary.\n - `\'micro\'`: Calculate metrics globally by counting the total true positives, false negatives, and false positives.\n - `\'macro\'`: Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.\n - `\'weighted\'`: Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. Note that it can result in an F-score that is not between precision and recall.\n - `\'samples\'`: Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).\n- **sample_weight** (`list` of `float`): Sample weights Defaults to `None`.\n- **zero_division** (): Sets the value to return when there is a zero division. Defaults to .\n - `\'warn\'`: If there is a zero division, the return value is `0`, but warnings are also raised.\n - `0`: If there is a zero division, the return value is `0`.\n - `1`: If there is a zero division, the return value is `1`.\n\nReturns:\n- **recall** (`float`, or `array` of `float`): Either the general recall score, or the recall scores for individual classes, depending on the values input to `labels` and `average`. Minimum possible value is 0. Maximum possible value is 1. A higher recall means that more of the positive examples have been labeled correctly. Therefore, a higher recall is generally considered better.\n\nExamples:\n\n Example 1-A simple example with some errors\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1])\n >>> print(results)\n {\'recall\': 0.6666666666666666}\n\n Example 2-The same example as Example 1, but with `pos_label=0` instead of the default `pos_label=1`.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], pos_label=0)\n >>> print(results)\n {\'recall\': 0.5}\n\n Example 3-The same example as Example 1, but with `sample_weight` included.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> sample_weight = [0.9, 0.2, 0.9, 0.3, 0.8]\n >>> results = recall_metric.compute(references=[0, 0, 1, 1, 1], predictions=[0, 1, 0, 1, 1], sample_weight=sample_weight)\n >>> print(results)\n {\'recall\': 0.55}\n\n Example 4-A multiclass example, using different averages.\n >>> recall_metric = datasets.load_metric(\'recall\')\n >>> predictions = [0, 2, 1, 0, 0, 1]\n >>> references = [0, 1, 2, 0, 1, 2]\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'macro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'micro\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=\'weighted\')\n >>> print(results)\n {\'recall\': 0.3333333333333333}\n >>> results = recall_metric.compute(predictions=predictions, references=references, average=None)\n >>> print(results)\n {\'recall\': array([1., 0., 0.])}\n'
A : Tuple = '\n@article{scikit-learn, title={Scikit-learn: Machine Learning in {P}ython}, author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V. and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P. and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.}, journal={Journal of Machine Learning Research}, volume={12}, pages={2825--2830}, year={2011}\n'
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class __A( datasets.Metric ):
def SCREAMING_SNAKE_CASE_ ( self ) -> Union[str, Any]:
'''simple docstring'''
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.recall_score.html'''] , )
def SCREAMING_SNAKE_CASE_ ( self , _snake_case , _snake_case , _snake_case=None , _snake_case=1 , _snake_case="binary" , _snake_case=None , _snake_case="warn" , ) -> Any:
'''simple docstring'''
__a = recall_score(
_snake_case , _snake_case , labels=_snake_case , pos_label=_snake_case , average=_snake_case , sample_weight=_snake_case , zero_division=_snake_case , )
return {"recall": float(_snake_case ) if score.size == 1 else score} | 6 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | 0 |
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[Any] ) -> Union[str, Any]:
'''simple docstring'''
if "img_encoder.pos_embed" in name:
A__ = name.replace('img_encoder.pos_embed' , 'vision_model.embeddings.position_embeddings' )
if "img_encoder.patch_embed.proj" in name:
A__ = name.replace('img_encoder.patch_embed.proj' , 'vision_model.embeddings.patch_embeddings.projection' )
if "img_encoder.patch_embed.norm" in name:
A__ = name.replace('img_encoder.patch_embed.norm' , 'vision_model.embeddings.layernorm' )
if "img_encoder.layers" in name:
A__ = name.replace('img_encoder.layers' , 'vision_model.encoder.stages' )
if "blocks" in name and "res" not in name:
A__ = name.replace('blocks' , 'layers' )
if "attn" in name and "pre_assign" not in name:
A__ = name.replace('attn' , 'self_attn' )
if "proj" in name and "self_attn" in name and "text" not in name:
A__ = name.replace('proj' , 'out_proj' )
if "pre_assign_attn.attn.proj" in name:
A__ = name.replace('pre_assign_attn.attn.proj' , 'pre_assign_attn.attn.out_proj' )
if "norm1" in name:
A__ = name.replace('norm1' , 'layer_norm1' )
if "norm2" in name and "pre_assign" not in name:
A__ = name.replace('norm2' , 'layer_norm2' )
if "img_encoder.norm" in name:
A__ = name.replace('img_encoder.norm' , 'vision_model.layernorm' )
# text encoder
if "text_encoder.token_embedding" in name:
A__ = name.replace('text_encoder.token_embedding' , 'text_model.embeddings.token_embedding' )
if "text_encoder.positional_embedding" in name:
A__ = name.replace('text_encoder.positional_embedding' , 'text_model.embeddings.position_embedding.weight' )
if "text_encoder.transformer.resblocks." in name:
A__ = name.replace('text_encoder.transformer.resblocks.' , 'text_model.encoder.layers.' )
if "ln_1" in name:
A__ = name.replace('ln_1' , 'layer_norm1' )
if "ln_2" in name:
A__ = name.replace('ln_2' , 'layer_norm2' )
if "c_fc" in name:
A__ = name.replace('c_fc' , 'fc1' )
if "c_proj" in name:
A__ = name.replace('c_proj' , 'fc2' )
if "text_encoder" in name:
A__ = name.replace('text_encoder' , 'text_model' )
if "ln_final" in name:
A__ = name.replace('ln_final' , 'final_layer_norm' )
# projection layers
if "img_projector.linear_hidden." in name:
A__ = name.replace('img_projector.linear_hidden.' , 'visual_projection.' )
if "img_projector.linear_out." in name:
A__ = name.replace('img_projector.linear_out.' , 'visual_projection.3.' )
if "text_projector.linear_hidden" in name:
A__ = name.replace('text_projector.linear_hidden' , 'text_projection' )
if "text_projector.linear_out" in name:
A__ = name.replace('text_projector.linear_out' , 'text_projection.3' )
return name
def _snake_case( SCREAMING_SNAKE_CASE__ : Optional[int] , SCREAMING_SNAKE_CASE__ : Tuple ) -> str:
'''simple docstring'''
for key in orig_state_dict.copy().keys():
A__ = orig_state_dict.pop(SCREAMING_SNAKE_CASE__ )
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A__ = key.split('.' )
A__ , A__ = int(key_split[2] ), int(key_split[4] )
A__ = config.vision_config.hidden_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[dim : dim * 2, :]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A__ = key.split('.' )
A__ = int(key_split[3] )
A__ = config.text_config.hidden_size
if "weight" in key:
A__ = val[:dim, :]
A__ = val[
dim : dim * 2, :
]
A__ = val[-dim:, :]
else:
A__ = val[:dim]
A__ = val[dim : dim * 2]
A__ = val[-dim:]
else:
A__ = rename_key(SCREAMING_SNAKE_CASE__ )
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
A__ = val.squeeze_()
else:
A__ = val
return orig_state_dict
def _snake_case( ) -> Optional[int]:
'''simple docstring'''
A__ = 'http://images.cocodataset.org/val2017/000000039769.jpg'
A__ = Image.open(requests.get(SCREAMING_SNAKE_CASE__ , stream=SCREAMING_SNAKE_CASE__ ).raw )
return im
@torch.no_grad()
def _snake_case( SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : List[str]="groupvit-gcc-yfcc" , SCREAMING_SNAKE_CASE__ : List[Any]=False ) -> Dict:
'''simple docstring'''
A__ = GroupViTConfig()
A__ = GroupViTModel(SCREAMING_SNAKE_CASE__ ).eval()
A__ = torch.load(SCREAMING_SNAKE_CASE__ , map_location='cpu' )['model']
A__ = convert_state_dict(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
A__ , A__ = model.load_state_dict(SCREAMING_SNAKE_CASE__ , strict=SCREAMING_SNAKE_CASE__ )
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(SCREAMING_SNAKE_CASE__ ) == 0)
# verify result
A__ = CLIPProcessor.from_pretrained('openai/clip-vit-base-patch32' )
A__ = prepare_img()
A__ = processor(text=['a photo of a cat', 'a photo of a dog'] , images=SCREAMING_SNAKE_CASE__ , padding=SCREAMING_SNAKE_CASE__ , return_tensors='pt' )
with torch.no_grad():
A__ = model(**SCREAMING_SNAKE_CASE__ )
if model_name == "groupvit-gcc-yfcc":
A__ = torch.tensor([[13.3523, 6.3629]] )
elif model_name == "groupvit-gcc-redcaps":
A__ = torch.tensor([[16.1873, 8.6230]] )
else:
raise ValueError(f'Model name {model_name} not supported.' )
assert torch.allclose(outputs.logits_per_image , SCREAMING_SNAKE_CASE__ , atol=1E-3 )
processor.save_pretrained(SCREAMING_SNAKE_CASE__ )
model.save_pretrained(SCREAMING_SNAKE_CASE__ )
print('Successfully saved processor and model to' , SCREAMING_SNAKE_CASE__ )
if push_to_hub:
print('Pushing to the hub...' )
processor.push_to_hub(SCREAMING_SNAKE_CASE__ , organization='nielsr' )
model.push_to_hub(SCREAMING_SNAKE_CASE__ , organization='nielsr' )
if __name__ == "__main__":
lowercase_ = argparse.ArgumentParser()
parser.add_argument(
"--pytorch_dump_folder_path", default=None, type=str, help="Path to dump the processor and PyTorch model."
)
parser.add_argument("--checkpoint_path", default=None, type=str, help="Path to GroupViT checkpoint")
parser.add_argument(
"--model_name",
default="groupvit-gccy-fcc",
type=str,
help="Name of the model. Expecting either 'groupvit-gcc-yfcc' or 'groupvit-gcc-redcaps'",
)
parser.add_argument(
"--push_to_hub",
action="store_true",
help="Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.",
)
lowercase_ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 7 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ):
"""simple docstring"""
__magic_name__ : list[int] = [0]
__magic_name__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__magic_name__ : int = 0
# the area corresponding to the grid that gives the product closest to target
__magic_name__ : int = 0
# an estimate of b, using the quadratic formula
__magic_name__ : float
# the largest integer less than b_estimate
__magic_name__ : int
# the largest integer less than b_estimate
__magic_name__ : int
# the triangle number corresponding to b_floor
__magic_name__ : int
# the triangle number corresponding to b_ceil
__magic_name__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__magic_name__ : List[Any] = floor(lowerCAmelCase )
__magic_name__ : Dict = ceil(lowerCAmelCase )
__magic_name__ : Any = triangle_numbers[b_floor]
__magic_name__ : Optional[int] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : Any = triangle_b_first_guess * triangle_a
__magic_name__ : Any = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : List[str] = triangle_b_second_guess * triangle_a
__magic_name__ : Optional[int] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'{solution() = }') | 331 | 0 |
from sklearn.metrics import fa_score
import datasets
lowerCAmelCase_ = '''
The F1 score is the harmonic mean of the precision and recall. It can be computed with the equation:
F1 = 2 * (precision * recall) / (precision + recall)
'''
lowerCAmelCase_ = '''
Args:
predictions (`list` of `int`): Predicted labels.
references (`list` of `int`): Ground truth labels.
labels (`list` of `int`): The set of labels to include when `average` is not set to `\'binary\'`, and the order of the labels if `average` is `None`. Labels present in the data can be excluded, for example to calculate a multiclass average ignoring a majority negative class. Labels not present in the data will result in 0 components in a macro average. For multilabel targets, labels are column indices. By default, all labels in `predictions` and `references` are used in sorted order. Defaults to None.
pos_label (`int`): The class to be considered the positive class, in the case where `average` is set to `binary`. Defaults to 1.
average (`string`): This parameter is required for multiclass/multilabel targets. If set to `None`, the scores for each class are returned. Otherwise, this determines the type of averaging performed on the data. Defaults to `\'binary\'`.
- \'binary\': Only report results for the class specified by `pos_label`. This is applicable only if the classes found in `predictions` and `references` are binary.
- \'micro\': Calculate metrics globally by counting the total true positives, false negatives and false positives.
- \'macro\': Calculate metrics for each label, and find their unweighted mean. This does not take label imbalance into account.
- \'weighted\': Calculate metrics for each label, and find their average weighted by support (the number of true instances for each label). This alters `\'macro\'` to account for label imbalance. This option can result in an F-score that is not between precision and recall.
- \'samples\': Calculate metrics for each instance, and find their average (only meaningful for multilabel classification).
sample_weight (`list` of `float`): Sample weights Defaults to None.
Returns:
f1 (`float` or `array` of `float`): F1 score or list of f1 scores, depending on the value passed to `average`. Minimum possible value is 0. Maximum possible value is 1. Higher f1 scores are better.
Examples:
Example 1-A simple binary example
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0])
>>> print(results)
{\'f1\': 0.5}
Example 2-The same simple binary example as in Example 1, but with `pos_label` set to `0`.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], pos_label=0)
>>> print(round(results[\'f1\'], 2))
0.67
Example 3-The same simple binary example as in Example 1, but with `sample_weight` included.
>>> f1_metric = datasets.load_metric("f1")
>>> results = f1_metric.compute(references=[0, 1, 0, 1, 0], predictions=[0, 0, 1, 1, 0], sample_weight=[0.9, 0.5, 3.9, 1.2, 0.3])
>>> print(round(results[\'f1\'], 2))
0.35
Example 4-A multiclass example, with different values for the `average` input.
>>> predictions = [0, 2, 1, 0, 0, 1]
>>> references = [0, 1, 2, 0, 1, 2]
>>> results = f1_metric.compute(predictions=predictions, references=references, average="macro")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average="micro")
>>> print(round(results[\'f1\'], 2))
0.33
>>> results = f1_metric.compute(predictions=predictions, references=references, average="weighted")
>>> print(round(results[\'f1\'], 2))
0.27
>>> results = f1_metric.compute(predictions=predictions, references=references, average=None)
>>> print(results)
{\'f1\': array([0.8, 0. , 0. ])}
'''
lowerCAmelCase_ = '''
@article{scikit-learn,
title={Scikit-learn: Machine Learning in {P}ython},
author={Pedregosa, F. and Varoquaux, G. and Gramfort, A. and Michel, V.
and Thirion, B. and Grisel, O. and Blondel, M. and Prettenhofer, P.
and Weiss, R. and Dubourg, V. and Vanderplas, J. and Passos, A. and
Cournapeau, D. and Brucher, M. and Perrot, M. and Duchesnay, E.},
journal={Journal of Machine Learning Research},
volume={12},
pages={2825--2830},
year={2011}
}
'''
@datasets.utils.file_utils.add_start_docstrings(_DESCRIPTION , _KWARGS_DESCRIPTION )
class snake_case_ ( datasets.Metric ):
'''simple docstring'''
def snake_case__( self : Tuple ) ->Optional[Any]:
return datasets.MetricInfo(
description=_DESCRIPTION , citation=_CITATION , inputs_description=_KWARGS_DESCRIPTION , features=datasets.Features(
{
'''predictions''': datasets.Sequence(datasets.Value('''int32''' ) ),
'''references''': datasets.Sequence(datasets.Value('''int32''' ) ),
}
if self.config_name == '''multilabel'''
else {
'''predictions''': datasets.Value('''int32''' ),
'''references''': datasets.Value('''int32''' ),
} ) , reference_urls=['''https://scikit-learn.org/stable/modules/generated/sklearn.metrics.f1_score.html'''] , )
def snake_case__( self : List[str] , _UpperCamelCase : str , _UpperCamelCase : Optional[Any] , _UpperCamelCase : List[Any]=None , _UpperCamelCase : Optional[int]=1 , _UpperCamelCase : List[str]="binary" , _UpperCamelCase : Tuple=None ) ->Optional[Any]:
snake_case_ = fa_score(
_UpperCamelCase , _UpperCamelCase , labels=_UpperCamelCase , pos_label=_UpperCamelCase , average=_UpperCamelCase , sample_weight=_UpperCamelCase )
return {"f1": float(_UpperCamelCase ) if score.size == 1 else score} | 8 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Dict = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Tuple = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Any = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 331 | 0 |
from abc import ABC, abstractmethod
from typing import List, Optional
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Tuple ) -> Union[str, Any]:
# test for the above condition
self.test()
def __magic_name__( self :List[str] ) -> int:
__SCREAMING_SNAKE_CASE : List[Any] = 0
__SCREAMING_SNAKE_CASE : Tuple = False
while not completed:
if counter == 1:
self.reset()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.advance()
if not self.does_advance(lowerCAmelCase__ ):
raise Exception(
'''Custom Constraint is not defined correctly. self.does_advance(self.advance()) must be true.''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[Any] = self.update(lowerCAmelCase__ )
counter += 1
if counter > 10_000:
raise Exception('''update() does not fulfill the constraint.''' )
if self.remaining() != 0:
raise Exception('''Custom Constraint is not defined correctly.''' )
@abstractmethod
def __magic_name__( self :Optional[int] ) -> str:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __magic_name__( self :str , lowerCAmelCase__ :int ) -> Optional[int]:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __magic_name__( self :Dict , lowerCAmelCase__ :int ) -> int:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __magic_name__( self :List[str] ) -> Tuple:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __magic_name__( self :Tuple ) -> Tuple:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
@abstractmethod
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Dict=False ) -> Tuple:
raise NotImplementedError(
f'''{self.__class__} is an abstract class. Only classes inheriting this class can be called.''' )
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :int , lowerCAmelCase__ :List[int] ) -> Optional[Any]:
super(lowerCAmelCase__ , self ).__init__()
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or len(lowerCAmelCase__ ) == 0:
raise ValueError(f'''`token_ids` has to be a non-empty list, but is {token_ids}.''' )
if any((not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or token_id < 0) for token_id in token_ids ):
raise ValueError(f'''Each list in `token_ids` has to be a list of positive integers, but is {token_ids}.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = token_ids
__SCREAMING_SNAKE_CASE : Optional[Any] = len(self.token_ids )
__SCREAMING_SNAKE_CASE : Tuple = -1 # the index of the currently fulfilled step
__SCREAMING_SNAKE_CASE : int = False
def __magic_name__( self :List[str] ) -> List[str]:
if self.completed:
return None
return self.token_ids[self.fulfilled_idx + 1]
def __magic_name__( self :Dict , lowerCAmelCase__ :int ) -> List[str]:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(lowerCAmelCase__ )}''' )
if self.completed:
return False
return token_id == self.token_ids[self.fulfilled_idx + 1]
def __magic_name__( self :int , lowerCAmelCase__ :int ) -> Tuple:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f'''`token_id` has to be an `int`, but is {token_id} of type {type(lowerCAmelCase__ )}''' )
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : List[str] = False
if self.does_advance(lowerCAmelCase__ ):
self.fulfilled_idx += 1
__SCREAMING_SNAKE_CASE : Dict = True
if self.fulfilled_idx == (self.seqlen - 1):
__SCREAMING_SNAKE_CASE : Union[str, Any] = True
__SCREAMING_SNAKE_CASE : int = completed
else:
# failed to make progress.
__SCREAMING_SNAKE_CASE : List[str] = True
self.reset()
return stepped, completed, reset
def __magic_name__( self :Any ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = False
__SCREAMING_SNAKE_CASE : str = 0
def __magic_name__( self :Any ) -> str:
return self.seqlen - (self.fulfilled_idx + 1)
def __magic_name__( self :str , lowerCAmelCase__ :Optional[Any]=False ) -> List[str]:
__SCREAMING_SNAKE_CASE : int = PhrasalConstraint(self.token_ids )
if stateful:
__SCREAMING_SNAKE_CASE : Any = self.seqlen
__SCREAMING_SNAKE_CASE : Dict = self.fulfilled_idx
__SCREAMING_SNAKE_CASE : List[str] = self.completed
return new_constraint
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[int] , lowerCAmelCase__ :List[List[int]] , lowerCAmelCase__ :List[Any]=True ) -> str:
__SCREAMING_SNAKE_CASE : Any = max([len(lowerCAmelCase__ ) for one in nested_token_ids] )
__SCREAMING_SNAKE_CASE : int = {}
for token_ids in nested_token_ids:
__SCREAMING_SNAKE_CASE : int = root
for tidx, token_id in enumerate(lowerCAmelCase__ ):
if token_id not in level:
__SCREAMING_SNAKE_CASE : Any = {}
__SCREAMING_SNAKE_CASE : Dict = level[token_id]
if no_subsets and self.has_subsets(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(
'''Each list in `nested_token_ids` can\'t be a complete subset of another list, but is'''
f''' {nested_token_ids}.''' )
__SCREAMING_SNAKE_CASE : Union[str, Any] = root
def __magic_name__( self :Union[str, Any] , lowerCAmelCase__ :Dict ) -> Optional[Any]:
__SCREAMING_SNAKE_CASE : int = self.trie
for current_token in current_seq:
__SCREAMING_SNAKE_CASE : List[Any] = start[current_token]
__SCREAMING_SNAKE_CASE : Optional[Any] = list(start.keys() )
return next_tokens
def __magic_name__( self :Tuple , lowerCAmelCase__ :Tuple ) -> Tuple:
__SCREAMING_SNAKE_CASE : Optional[int] = self.next_tokens(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) == 0
def __magic_name__( self :Optional[Any] , lowerCAmelCase__ :Union[str, Any] ) -> List[Any]:
__SCREAMING_SNAKE_CASE : Optional[Any] = list(root.values() )
if len(lowerCAmelCase__ ) == 0:
return 1
else:
return sum([self.count_leaves(lowerCAmelCase__ ) for nn in next_nodes] )
def __magic_name__( self :int , lowerCAmelCase__ :List[Any] , lowerCAmelCase__ :int ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = self.count_leaves(lowerCAmelCase__ )
return len(lowerCAmelCase__ ) != leaf_count
class _lowercase ( A__ ):
'''simple docstring'''
def __init__( self :Union[str, Any] , lowerCAmelCase__ :List[List[int]] ) -> int:
super(lowerCAmelCase__ , self ).__init__()
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or len(lowerCAmelCase__ ) == 0:
raise ValueError(f'''`nested_token_ids` has to be a non-empty list, but is {nested_token_ids}.''' )
if any(not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) for token_ids in nested_token_ids ):
raise ValueError(f'''`nested_token_ids` has to be a list of lists, but is {nested_token_ids}.''' )
if any(
any((not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ) or token_id < 0) for token_id in token_ids )
for token_ids in nested_token_ids ):
raise ValueError(
f'''Each list in `nested_token_ids` has to be a list of positive integers, but is {nested_token_ids}.''' )
__SCREAMING_SNAKE_CASE : str = DisjunctiveTrie(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : str = nested_token_ids
__SCREAMING_SNAKE_CASE : Optional[Any] = self.trie.max_height
__SCREAMING_SNAKE_CASE : List[Any] = []
__SCREAMING_SNAKE_CASE : Tuple = False
def __magic_name__( self :List[Any] ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = self.trie.next_tokens(self.current_seq )
if len(lowerCAmelCase__ ) == 0:
return None
else:
return token_list
def __magic_name__( self :Dict , lowerCAmelCase__ :int ) -> Union[str, Any]:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowerCAmelCase__ )}''' )
__SCREAMING_SNAKE_CASE : Tuple = self.trie.next_tokens(self.current_seq )
return token_id in next_tokens
def __magic_name__( self :Any , lowerCAmelCase__ :int ) -> Union[str, Any]:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f'''`token_id` is supposed to be type `int`, but is {token_id} of type {type(lowerCAmelCase__ )}''' )
__SCREAMING_SNAKE_CASE : Dict = False
__SCREAMING_SNAKE_CASE : List[Any] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = False
if self.does_advance(lowerCAmelCase__ ):
self.current_seq.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = True
else:
__SCREAMING_SNAKE_CASE : List[str] = True
self.reset()
__SCREAMING_SNAKE_CASE : Optional[Any] = self.trie.reached_leaf(self.current_seq )
__SCREAMING_SNAKE_CASE : Any = completed
return stepped, completed, reset
def __magic_name__( self :Optional[Any] ) -> Any:
__SCREAMING_SNAKE_CASE : List[str] = False
__SCREAMING_SNAKE_CASE : Union[str, Any] = []
def __magic_name__( self :Optional[Any] ) -> Tuple:
if self.completed:
# since this can be completed without reaching max height
return 0
else:
return self.seqlen - len(self.current_seq )
def __magic_name__( self :Any , lowerCAmelCase__ :Tuple=False ) -> Union[str, Any]:
__SCREAMING_SNAKE_CASE : Union[str, Any] = DisjunctiveConstraint(self.token_ids )
if stateful:
__SCREAMING_SNAKE_CASE : int = self.seqlen
__SCREAMING_SNAKE_CASE : List[Any] = self.current_seq
__SCREAMING_SNAKE_CASE : Optional[int] = self.completed
return new_constraint
class _lowercase :
'''simple docstring'''
def __init__( self :Optional[Any] , lowerCAmelCase__ :List[Constraint] ) -> int:
__SCREAMING_SNAKE_CASE : Union[str, Any] = constraints
# max # of steps required to fulfill a given constraint
__SCREAMING_SNAKE_CASE : Optional[Any] = max([c.seqlen for c in constraints] )
__SCREAMING_SNAKE_CASE : Tuple = len(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = False
self.init_state()
def __magic_name__( self :int ) -> List[str]:
__SCREAMING_SNAKE_CASE : Optional[int] = []
__SCREAMING_SNAKE_CASE : List[str] = None
__SCREAMING_SNAKE_CASE : List[str] = [constraint.copy(stateful=lowerCAmelCase__ ) for constraint in self.constraints]
def __magic_name__( self :List[Any] ) -> Optional[int]:
__SCREAMING_SNAKE_CASE : Any = 0
if self.inprogress_constraint:
# extra points for having a constraint mid-fulfilled
add += self.max_seqlen - self.inprogress_constraint.remaining()
return (len(self.complete_constraints ) * self.max_seqlen) + add
def __magic_name__( self :Optional[Any] ) -> str:
__SCREAMING_SNAKE_CASE : Optional[Any] = []
if self.inprogress_constraint is None:
for constraint in self.pending_constraints: # "pending" == "unfulfilled yet"
__SCREAMING_SNAKE_CASE : Union[str, Any] = constraint.advance()
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
token_list.append(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
token_list.extend(lowerCAmelCase__ )
else:
__SCREAMING_SNAKE_CASE : Tuple = self.inprogress_constraint.advance()
if isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
token_list.append(lowerCAmelCase__ )
elif isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
token_list.extend(lowerCAmelCase__ )
if len(lowerCAmelCase__ ) == 0:
return None
else:
return token_list
def __magic_name__( self :Optional[int] , lowerCAmelCase__ :Optional[List[int]] ) -> int:
self.init_state()
if token_ids is not None:
for token in token_ids:
# completes or steps **one** constraint
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = self.add(lowerCAmelCase__ )
# the entire list of constraints are fulfilled
if self.completed:
break
def __magic_name__( self :str , lowerCAmelCase__ :int ) -> Any:
if not isinstance(lowerCAmelCase__ , lowerCAmelCase__ ):
raise ValueError(f'''`token_id` should be an `int`, but is `{token_id}`.''' )
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = False, False
if self.completed:
__SCREAMING_SNAKE_CASE : Dict = True
__SCREAMING_SNAKE_CASE : Dict = False
return complete, stepped
if self.inprogress_constraint is not None:
# In the middle of fulfilling a constraint. If the `token_id` *does* makes an incremental progress to current
# job, simply update the state
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.update(lowerCAmelCase__ )
if reset:
# 1. If the next token breaks the progress, then we must restart.
# e.g. constraint = "I love pies" and sequence so far is "I love" but `token_id` == "books".
# But that doesn't mean we self.init_state(), since we only reset the state for this particular
# constraint, not the full list of constraints.
self.pending_constraints.append(self.inprogress_constraint.copy(stateful=lowerCAmelCase__ ) )
__SCREAMING_SNAKE_CASE : int = None
if complete:
# 2. If the next token completes the constraint, move it to completed list, set
# inprogress to None. If there are no pending constraints either, then this full list of constraints
# is complete.
self.complete_constraints.append(self.inprogress_constraint )
__SCREAMING_SNAKE_CASE : int = None
if len(self.pending_constraints ) == 0:
# we're done!
__SCREAMING_SNAKE_CASE : List[Any] = True
else:
# Not in the middle of fulfilling a constraint. So does this `token_id` helps us step towards any of our list
# of constraints?
for cidx, pending_constraint in enumerate(self.pending_constraints ):
if pending_constraint.does_advance(lowerCAmelCase__ ):
__SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE : Optional[int] = pending_constraint.update(lowerCAmelCase__ )
if not stepped:
raise Exception(
'''`constraint.update(token_id)` is not yielding incremental progress, '''
'''even though `constraint.does_advance(token_id)` is true.''' )
if complete:
self.complete_constraints.append(lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : int = None
if not complete and stepped:
__SCREAMING_SNAKE_CASE : Dict = pending_constraint
if complete or stepped:
# If we made any progress at all, then it's at least not a "pending constraint".
__SCREAMING_SNAKE_CASE : Tuple = (
self.pending_constraints[:cidx] + self.pending_constraints[cidx + 1 :]
)
if len(self.pending_constraints ) == 0 and self.inprogress_constraint is None:
# If there's no longer any pending after this and no inprogress either, then we must be
# complete.
__SCREAMING_SNAKE_CASE : Dict = True
break # prevent accidentally stepping through multiple constraints with just one token.
return complete, stepped
def __magic_name__( self :int , lowerCAmelCase__ :List[Any]=True ) -> List[str]:
__SCREAMING_SNAKE_CASE : Tuple = ConstraintListState(self.constraints ) # we actually never though self.constraints objects
# throughout this process. So it's at initialization state.
if stateful:
__SCREAMING_SNAKE_CASE : Dict = [
constraint.copy(stateful=lowerCAmelCase__ ) for constraint in self.complete_constraints
]
if self.inprogress_constraint is not None:
__SCREAMING_SNAKE_CASE : List[str] = self.inprogress_constraint.copy(stateful=lowerCAmelCase__ )
__SCREAMING_SNAKE_CASE : List[str] = [constraint.copy() for constraint in self.pending_constraints]
return new_state
| 9 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = ["""pixel_values"""]
def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None:
super().__init__(**_A )
__magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384}
__magic_name__ : Dict = get_size_dict(_A , default_to_square=_A )
__magic_name__ : List[Any] = do_resize
__magic_name__ : str = size
# Default value set here for backwards compatibility where the value in config is None
__magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256
__magic_name__ : int = resample
__magic_name__ : List[str] = do_rescale
__magic_name__ : List[Any] = rescale_factor
__magic_name__ : str = do_normalize
__magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
__magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
__magic_name__ : Dict = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__magic_name__ : Dict = int(shortest_edge / crop_pct )
__magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
__magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int:
return rescale(_A , scale=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image:
__magic_name__ : int = do_resize if do_resize is not None else self.do_resize
__magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
__magic_name__ : Optional[Any] = resample if resample is not None else self.resample
__magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : str = image_mean if image_mean is not None else self.image_mean
__magic_name__ : Dict = image_std if image_std is not None else self.image_std
__magic_name__ : Dict = size if size is not None else self.size
__magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A )
__magic_name__ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
__magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images]
if do_rescale:
__magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
__magic_name__ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A ) | 331 | 0 |
import dataclasses
import re
import string
from typing import Any, Dict, Iterator, List, Mapping, Optional, Sequence, Tuple
import numpy as np
from . import residue_constants
__A = Mapping[str, np.ndarray]
__A = Mapping[str, Any] # Is a nested dict.
__A = 0.0_1
@dataclasses.dataclass(frozen=__SCREAMING_SNAKE_CASE )
class _SCREAMING_SNAKE_CASE :
'''simple docstring'''
lowercase_ = 42 # [num_res, num_atom_type, 3]
# Amino-acid type for each residue represented as an integer between 0 and
# 20, where 20 is 'X'.
lowercase_ = 42 # [num_res]
# Binary float mask to indicate presence of a particular atom. 1.0 if an atom
# is present and 0.0 if not. This should be used for loss masking.
lowercase_ = 42 # [num_res, num_atom_type]
# Residue index as used in PDB. It is not necessarily continuous or 0-indexed.
lowercase_ = 42 # [num_res]
# B-factors, or temperature factors, of each residue (in sq. angstroms units),
# representing the displacement of the residue from its ground truth mean
# value.
lowercase_ = 42 # [num_res, num_atom_type]
# Chain indices for multi-chain predictions
lowercase_ = None
# Optional remark about the protein. Included as a comment in output PDB
# files
lowercase_ = None
# Templates used to generate this protein (prediction-only)
lowercase_ = None
# Chain corresponding to each parent
lowercase_ = None
def lowerCAmelCase_ ( __a ) -> Protein:
"""simple docstring"""
lowerCamelCase__: Union[str, Any] =R"(\[[A-Z]+\]\n)"
lowerCamelCase__: List[str] =[tag.strip() for tag in re.split(__a , __a ) if len(__a ) > 0]
lowerCamelCase__: Iterator[Tuple[str, List[str]]] =zip(tags[0::2] , [l.split("\n" ) for l in tags[1::2]] )
lowerCamelCase__: List[str] =["N", "CA", "C"]
lowerCamelCase__: int =None
lowerCamelCase__: str =None
lowerCamelCase__: Dict =None
for g in groups:
if "[PRIMARY]" == g[0]:
lowerCamelCase__: Optional[Any] =g[1][0].strip()
for i in range(len(__a ) ):
if seq[i] not in residue_constants.restypes:
lowerCamelCase__: Optional[int] ="X" # FIXME: strings are immutable
lowerCamelCase__: Optional[int] =np.array(
[residue_constants.restype_order.get(__a , residue_constants.restype_num ) for res_symbol in seq] )
elif "[TERTIARY]" == g[0]:
lowerCamelCase__: List[List[float]] =[]
for axis in range(3 ):
tertiary.append(list(map(__a , g[1][axis].split() ) ) )
lowerCamelCase__: List[str] =np.array(__a )
lowerCamelCase__: List[str] =np.zeros((len(tertiary[0] ) // 3, residue_constants.atom_type_num, 3) ).astype(np.floataa )
for i, atom in enumerate(__a ):
lowerCamelCase__: str =np.transpose(tertiary_np[:, i::3] )
atom_positions *= PICO_TO_ANGSTROM
elif "[MASK]" == g[0]:
lowerCamelCase__: int =np.array(list(map({"-": 0, "+": 1}.get , g[1][0].strip() ) ) )
lowerCamelCase__: Dict =np.zeros(
(
len(__a ),
residue_constants.atom_type_num,
) ).astype(np.floataa )
for i, atom in enumerate(__a ):
lowerCamelCase__: int =1
atom_mask *= mask[..., None]
assert aatype is not None
return Protein(
atom_positions=__a , atom_mask=__a , aatype=__a , residue_index=np.arange(len(__a ) ) , b_factors=__a , )
def lowerCAmelCase_ ( __a , __a = 0 ) -> List[str]:
"""simple docstring"""
lowerCamelCase__: List[str] =[]
lowerCamelCase__: Optional[Any] =prot.remark
if remark is not None:
pdb_headers.append(F"""REMARK {remark}""" )
lowerCamelCase__: List[str] =prot.parents
lowerCamelCase__: Optional[int] =prot.parents_chain_index
if parents is not None and parents_chain_index is not None:
lowerCamelCase__: int =[p for i, p in zip(__a , __a ) if i == chain_id]
if parents is None or len(__a ) == 0:
lowerCamelCase__: Optional[int] =["N/A"]
pdb_headers.append(F"""PARENT {" ".join(__a )}""" )
return pdb_headers
def lowerCAmelCase_ ( __a , __a ) -> str:
"""simple docstring"""
lowerCamelCase__: List[str] =[]
lowerCamelCase__: Any =pdb_str.split("\n" )
lowerCamelCase__: Optional[Any] =prot.remark
if remark is not None:
out_pdb_lines.append(F"""REMARK {remark}""" )
lowerCamelCase__: List[List[str]]
if prot.parents is not None and len(prot.parents ) > 0:
lowerCamelCase__: int =[]
if prot.parents_chain_index is not None:
lowerCamelCase__: Dict[str, List[str]] ={}
for p, i in zip(prot.parents , prot.parents_chain_index ):
parent_dict.setdefault(str(__a ) , [] )
parent_dict[str(__a )].append(__a )
lowerCamelCase__: List[Any] =max([int(__a ) for chain_idx in parent_dict] )
for i in range(max_idx + 1 ):
lowerCamelCase__: Optional[Any] =parent_dict.get(str(__a ) , ["N/A"] )
parents_per_chain.append(__a )
else:
parents_per_chain.append(list(prot.parents ) )
else:
lowerCamelCase__: Optional[Any] =[["N/A"]]
def make_parent_line(__a ) -> str:
return F"""PARENT {" ".join(__a )}"""
out_pdb_lines.append(make_parent_line(parents_per_chain[0] ) )
lowerCamelCase__: Optional[int] =0
for i, l in enumerate(__a ):
if "PARENT" not in l and "REMARK" not in l:
out_pdb_lines.append(__a )
if "TER" in l and "END" not in lines[i + 1]:
chain_counter += 1
if not chain_counter >= len(__a ):
lowerCamelCase__: Union[str, Any] =parents_per_chain[chain_counter]
else:
lowerCamelCase__: int =["N/A"]
out_pdb_lines.append(make_parent_line(__a ) )
return "\n".join(__a )
def lowerCAmelCase_ ( __a ) -> str:
"""simple docstring"""
lowerCamelCase__: str =residue_constants.restypes + ["X"]
def res_atoa(__a ) -> str:
return residue_constants.restype_atoa.get(restypes[r] , "UNK" )
lowerCamelCase__: List[str] =residue_constants.atom_types
lowerCamelCase__: List[str] =[]
lowerCamelCase__: Any =prot.atom_mask
lowerCamelCase__: str =prot.aatype
lowerCamelCase__: Optional[int] =prot.atom_positions
lowerCamelCase__: List[str] =prot.residue_index.astype(np.intaa )
lowerCamelCase__: List[str] =prot.b_factors
lowerCamelCase__: str =prot.chain_index
if np.any(aatype > residue_constants.restype_num ):
raise ValueError("Invalid aatypes." )
lowerCamelCase__: str =get_pdb_headers(__a )
if len(__a ) > 0:
pdb_lines.extend(__a )
lowerCamelCase__: Dict =aatype.shape[0]
lowerCamelCase__: Dict =1
lowerCamelCase__: List[Any] =0
lowerCamelCase__: Optional[Any] =string.ascii_uppercase
lowerCamelCase__: List[str] =None
# Add all atom sites.
for i in range(__a ):
lowerCamelCase__: Any =res_atoa(aatype[i] )
for atom_name, pos, mask, b_factor in zip(__a , atom_positions[i] , atom_mask[i] , b_factors[i] ):
if mask < 0.5:
continue
lowerCamelCase__: Union[str, Any] ="ATOM"
lowerCamelCase__: Union[str, Any] =atom_name if len(__a ) == 4 else F""" {atom_name}"""
lowerCamelCase__: int =""
lowerCamelCase__: List[str] =""
lowerCamelCase__: int =1.0_0
lowerCamelCase__: Optional[int] =atom_name[0] # Protein supports only C, N, O, S, this works.
lowerCamelCase__: Dict =""
lowerCamelCase__: Union[str, Any] ="A"
if chain_index is not None:
lowerCamelCase__: Any =chain_tags[chain_index[i]]
# PDB is a columnar format, every space matters here!
lowerCamelCase__: str =(
F"""{record_type:<6}{atom_index:>5} {name:<4}{alt_loc:>1}"""
F"""{res_name_a:>3} {chain_tag:>1}"""
F"""{residue_index[i]:>4}{insertion_code:>1} """
F"""{pos[0]:>8.3f}{pos[1]:>8.3f}{pos[2]:>8.3f}"""
F"""{occupancy:>6.2f}{b_factor:>6.2f} """
F"""{element:>2}{charge:>2}"""
)
pdb_lines.append(__a )
atom_index += 1
lowerCamelCase__: Optional[Any] =i == n - 1
if chain_index is not None:
if i != n - 1 and chain_index[i + 1] != prev_chain_index:
lowerCamelCase__: List[str] =True
lowerCamelCase__: Optional[int] =chain_index[i + 1]
if should_terminate:
# Close the chain.
lowerCamelCase__: str ="TER"
lowerCamelCase__: List[Any] =(
F"""{chain_end:<6}{atom_index:>5} {res_atoa(aatype[i] ):>3} {chain_tag:>1}{residue_index[i]:>4}"""
)
pdb_lines.append(__a )
atom_index += 1
if i != n - 1:
# "prev" is a misnomer here. This happens at the beginning of
# each new chain.
pdb_lines.extend(get_pdb_headers(__a , __a ) )
pdb_lines.append("END" )
pdb_lines.append("" )
return "\n".join(__a )
def lowerCAmelCase_ ( __a ) -> np.ndarray:
"""simple docstring"""
return residue_constants.STANDARD_ATOM_MASK[prot.aatype]
def lowerCAmelCase_ ( __a , __a , __a = None , __a = None , __a = None , __a = None , __a = None , ) -> Protein:
"""simple docstring"""
return Protein(
aatype=features["aatype"] , atom_positions=result["final_atom_positions"] , atom_mask=result["final_atom_mask"] , residue_index=features["residue_index"] + 1 , b_factors=b_factors if b_factors is not None else np.zeros_like(result["final_atom_mask"] ) , chain_index=__a , remark=__a , parents=__a , parents_chain_index=__a , )
| 10 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase :Tuple = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s
lowerCAmelCase :Union[str, Any] = 3E8 # unit of c : m * s^-1
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
__magic_name__ : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__magic_name__ : Optional[int] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__magic_name__ : Union[str, Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod() | 331 | 0 |
from typing import Any, Dict, Optional
import torch
import torch.nn.functional as F
from torch import nn
from ..utils import maybe_allow_in_graph
from .activations import get_activation
from .attention_processor import Attention
from .embeddings import CombinedTimestepLabelEmbeddings
@maybe_allow_in_graph
class lowerCAmelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=0.0 , __lowerCamelCase = None , __lowerCamelCase = "geglu" , __lowerCamelCase = None , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = False , __lowerCamelCase = True , __lowerCamelCase = "layer_norm" , __lowerCamelCase = False , ) -> Optional[int]:
super().__init__()
_A : Optional[Any] = only_cross_attention
_A : int = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
_A : List[Any] = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
raise ValueError(
F"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
F" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}.")
# Define 3 blocks. Each block has its own normalization layer.
# 1. Self-Attn
if self.use_ada_layer_norm:
_A : Any = AdaLayerNorm(__lowerCamelCase , __lowerCamelCase)
elif self.use_ada_layer_norm_zero:
_A : Optional[int] = AdaLayerNormZero(__lowerCamelCase , __lowerCamelCase)
else:
_A : str = nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase)
_A : int = Attention(
query_dim=__lowerCamelCase , heads=__lowerCamelCase , dim_head=__lowerCamelCase , dropout=__lowerCamelCase , bias=__lowerCamelCase , cross_attention_dim=cross_attention_dim if only_cross_attention else None , upcast_attention=__lowerCamelCase , )
# 2. Cross-Attn
if cross_attention_dim is not None or double_self_attention:
# We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
# I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
# the second cross attention block.
_A : int = (
AdaLayerNorm(__lowerCamelCase , __lowerCamelCase)
if self.use_ada_layer_norm
else nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase)
)
_A : Optional[int] = Attention(
query_dim=__lowerCamelCase , cross_attention_dim=cross_attention_dim if not double_self_attention else None , heads=__lowerCamelCase , dim_head=__lowerCamelCase , dropout=__lowerCamelCase , bias=__lowerCamelCase , upcast_attention=__lowerCamelCase , ) # is self-attn if encoder_hidden_states is none
else:
_A : Tuple = None
_A : List[Any] = None
# 3. Feed-forward
_A : Any = nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase)
_A : Any = FeedForward(__lowerCamelCase , dropout=__lowerCamelCase , activation_fn=__lowerCamelCase , final_dropout=__lowerCamelCase)
# let chunk size default to None
_A : Optional[int] = None
_A : List[Any] = 0
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]:
# Sets chunk feed-forward
_A : Optional[int] = chunk_size
_A : str = dim
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , __lowerCamelCase = None , ) -> int:
# Notice that normalization is always applied before the real computation in the following blocks.
# 1. Self-Attention
if self.use_ada_layer_norm:
_A : Union[str, Any] = self.norma(__lowerCamelCase , __lowerCamelCase)
elif self.use_ada_layer_norm_zero:
_A , _A , _A , _A , _A : Tuple = self.norma(
__lowerCamelCase , __lowerCamelCase , __lowerCamelCase , hidden_dtype=hidden_states.dtype)
else:
_A : Tuple = self.norma(__lowerCamelCase)
_A : Dict = cross_attention_kwargs if cross_attention_kwargs is not None else {}
_A : Optional[Any] = self.attna(
__lowerCamelCase , encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None , attention_mask=__lowerCamelCase , **__lowerCamelCase , )
if self.use_ada_layer_norm_zero:
_A : int = gate_msa.unsqueeze(1) * attn_output
_A : str = attn_output + hidden_states
# 2. Cross-Attention
if self.attna is not None:
_A : Any = (
self.norma(__lowerCamelCase , __lowerCamelCase) if self.use_ada_layer_norm else self.norma(__lowerCamelCase)
)
_A : Union[str, Any] = self.attna(
__lowerCamelCase , encoder_hidden_states=__lowerCamelCase , attention_mask=__lowerCamelCase , **__lowerCamelCase , )
_A : Any = attn_output + hidden_states
# 3. Feed-forward
_A : str = self.norma(__lowerCamelCase)
if self.use_ada_layer_norm_zero:
_A : Optional[Any] = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
if self._chunk_size is not None:
# "feed_forward_chunk_size" can be used to save memory
if norm_hidden_states.shape[self._chunk_dim] % self._chunk_size != 0:
raise ValueError(
F"`hidden_states` dimension to be chunked: {norm_hidden_states.shape[self._chunk_dim]} has to be divisible by chunk size: {self._chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`.")
_A : Optional[Any] = norm_hidden_states.shape[self._chunk_dim] // self._chunk_size
_A : int = torch.cat(
[self.ff(__lowerCamelCase) for hid_slice in norm_hidden_states.chunk(__lowerCamelCase , dim=self._chunk_dim)] , dim=self._chunk_dim , )
else:
_A : List[str] = self.ff(__lowerCamelCase)
if self.use_ada_layer_norm_zero:
_A : Optional[int] = gate_mlp.unsqueeze(1) * ff_output
_A : Optional[int] = ff_output + hidden_states
return hidden_states
class lowerCAmelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = 4 , __lowerCamelCase = 0.0 , __lowerCamelCase = "geglu" , __lowerCamelCase = False , ) -> Tuple:
super().__init__()
_A : List[Any] = int(dim * mult)
_A : Any = dim_out if dim_out is not None else dim
if activation_fn == "gelu":
_A : Tuple = GELU(__lowerCamelCase , __lowerCamelCase)
if activation_fn == "gelu-approximate":
_A : List[Any] = GELU(__lowerCamelCase , __lowerCamelCase , approximate="tanh")
elif activation_fn == "geglu":
_A : Optional[Any] = GEGLU(__lowerCamelCase , __lowerCamelCase)
elif activation_fn == "geglu-approximate":
_A : Any = ApproximateGELU(__lowerCamelCase , __lowerCamelCase)
_A : List[str] = nn.ModuleList([])
# project in
self.net.append(__lowerCamelCase)
# project dropout
self.net.append(nn.Dropout(__lowerCamelCase))
# project out
self.net.append(nn.Linear(__lowerCamelCase , __lowerCamelCase))
# FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
if final_dropout:
self.net.append(nn.Dropout(__lowerCamelCase))
def _lowerCamelCase ( self , __lowerCamelCase) -> List[str]:
for module in self.net:
_A : List[Any] = module(__lowerCamelCase)
return hidden_states
class lowerCAmelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = "none") -> Any:
super().__init__()
_A : str = nn.Linear(__lowerCamelCase , __lowerCamelCase)
_A : int = approximate
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[int]:
if gate.device.type != "mps":
return F.gelu(__lowerCamelCase , approximate=self.approximate)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa) , approximate=self.approximate).to(dtype=gate.dtype)
def _lowerCamelCase ( self , __lowerCamelCase) -> Union[str, Any]:
_A : Dict = self.proj(__lowerCamelCase)
_A : int = self.gelu(__lowerCamelCase)
return hidden_states
class lowerCAmelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]:
super().__init__()
_A : int = nn.Linear(__lowerCamelCase , dim_out * 2)
def _lowerCamelCase ( self , __lowerCamelCase) -> Optional[int]:
if gate.device.type != "mps":
return F.gelu(__lowerCamelCase)
# mps: gelu is not implemented for float16
return F.gelu(gate.to(dtype=torch.floataa)).to(dtype=gate.dtype)
def _lowerCamelCase ( self , __lowerCamelCase) -> Dict:
_A , _A : List[Any] = self.proj(__lowerCamelCase).chunk(2 , dim=-1)
return hidden_states * self.gelu(__lowerCamelCase)
class lowerCAmelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase) -> Union[str, Any]:
super().__init__()
_A : Union[str, Any] = nn.Linear(__lowerCamelCase , __lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase) -> Any:
_A : Any = self.proj(__lowerCamelCase)
return x * torch.sigmoid(1.7_0_2 * x)
class lowerCAmelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase) -> List[str]:
super().__init__()
_A : Optional[int] = nn.Embedding(__lowerCamelCase , __lowerCamelCase)
_A : Tuple = nn.SiLU()
_A : Union[str, Any] = nn.Linear(__lowerCamelCase , embedding_dim * 2)
_A : int = nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> List[Any]:
_A : Dict = self.linear(self.silu(self.emb(__lowerCamelCase)))
_A , _A : Optional[Any] = torch.chunk(__lowerCamelCase , 2)
_A : Any = self.norm(__lowerCamelCase) * (1 + scale) + shift
return x
class lowerCAmelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase) -> Optional[Any]:
super().__init__()
_A : str = CombinedTimestepLabelEmbeddings(__lowerCamelCase , __lowerCamelCase)
_A : Optional[int] = nn.SiLU()
_A : Dict = nn.Linear(__lowerCamelCase , 6 * embedding_dim , bias=__lowerCamelCase)
_A : int = nn.LayerNorm(__lowerCamelCase , elementwise_affine=__lowerCamelCase , eps=1e-6)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase=None) -> Union[str, Any]:
_A : List[Any] = self.linear(self.silu(self.emb(__lowerCamelCase , __lowerCamelCase , hidden_dtype=__lowerCamelCase)))
_A , _A , _A , _A , _A , _A : Optional[int] = emb.chunk(6 , dim=1)
_A : int = self.norm(__lowerCamelCase) * (1 + scale_msa[:, None]) + shift_msa[:, None]
return x, gate_msa, shift_mlp, scale_mlp, gate_mlp
class lowerCAmelCase__ ( nn.Module):
'''simple docstring'''
def __init__( self , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase , __lowerCamelCase = None , __lowerCamelCase = 1e-5) -> int:
super().__init__()
_A : Any = num_groups
_A : Any = eps
if act_fn is None:
_A : Tuple = None
else:
_A : str = get_activation(__lowerCamelCase)
_A : List[Any] = nn.Linear(__lowerCamelCase , out_dim * 2)
def _lowerCamelCase ( self , __lowerCamelCase , __lowerCamelCase) -> Optional[Any]:
if self.act:
_A : Tuple = self.act(__lowerCamelCase)
_A : Optional[Any] = self.linear(__lowerCamelCase)
_A : Tuple = emb[:, :, None, None]
_A , _A : Union[str, Any] = emb.chunk(2 , dim=1)
_A : List[str] = F.group_norm(__lowerCamelCase , self.num_groups , eps=self.eps)
_A : int = x * (1 + scale) + shift
return x
| 11 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase :Tuple = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase :List[Any] = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase :Optional[Any] = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase :Union[str, Any] = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase :Tuple = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) )
__magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
__magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase ( lowerCAmelCase : int = 100 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Any = PokerHand(lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS]
__magic_name__ : Tuple = poker_hands.copy()
shuffle(lowerCAmelCase )
__magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) )
for index, hand in enumerate(lowerCAmelCase ):
assert hand == poker_hands[index]
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' )
__magic_name__ : Optional[Any] = True
__magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = 0
__magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) )
__magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' )
with open(lowerCAmelCase ) as file_hand:
for line in file_hand:
__magic_name__ : Optional[int] = line[:14].strip()
__magic_name__ : List[Any] = line[15:].strip()
__magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase )
__magic_name__ : List[Any] = player.compare_with(lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 376 | 331 | 0 |
import unittest
from transformers import GPTNeoXJapaneseConfig, is_torch_available
from transformers.models.gpt_neox_japanese.tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import GPTNeoXJapaneseForCausalLM, GPTNeoXJapaneseModel
class lowerCamelCase__:
def __init__( self: Optional[int] , UpperCamelCase_: str , UpperCamelCase_: Any=13 , UpperCamelCase_: Optional[int]=7 , UpperCamelCase_: Any=True , UpperCamelCase_: Union[str, Any]=True , UpperCamelCase_: Dict=True , UpperCamelCase_: Optional[Any]=True , UpperCamelCase_: List[Any]=99 , UpperCamelCase_: Optional[int]=32 , UpperCamelCase_: List[Any]=5 , UpperCamelCase_: int=4 , UpperCamelCase_: List[str]=4 , UpperCamelCase_: Union[str, Any]="gelu" , UpperCamelCase_: List[Any]=0.0 , UpperCamelCase_: Dict=0.1 , UpperCamelCase_: str=True , UpperCamelCase_: Optional[Any]=5_12 , UpperCamelCase_: int=16 , UpperCamelCase_: Optional[Any]=2 , UpperCamelCase_: List[Any]=0.02 , UpperCamelCase_: Union[str, Any]=3 , UpperCamelCase_: Optional[Any]=4 , UpperCamelCase_: Tuple=None , ):
__lowerCamelCase = parent
__lowerCamelCase = batch_size
__lowerCamelCase = seq_length
__lowerCamelCase = is_training
__lowerCamelCase = use_input_mask
__lowerCamelCase = use_token_type_ids
__lowerCamelCase = use_labels
__lowerCamelCase = vocab_size
__lowerCamelCase = hidden_size
__lowerCamelCase = num_hidden_layers
__lowerCamelCase = num_attention_heads
__lowerCamelCase = intermediate_multiple_size
__lowerCamelCase = hidden_act
__lowerCamelCase = hidden_dropout
__lowerCamelCase = attention_dropout
__lowerCamelCase = weight_tying
__lowerCamelCase = max_position_embeddings
__lowerCamelCase = type_vocab_size
__lowerCamelCase = type_sequence_label_size
__lowerCamelCase = initializer_range
__lowerCamelCase = num_labels
__lowerCamelCase = num_choices
__lowerCamelCase = scope
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__lowerCamelCase = None
if self.use_input_mask:
__lowerCamelCase = random_attention_mask([self.batch_size, self.seq_length] )
__lowerCamelCase = None
if self.use_labels:
__lowerCamelCase = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__lowerCamelCase = self.get_config()
return config, input_ids, input_mask, token_labels
def lowerCAmelCase__ ( self: Any ):
return GPTNeoXJapaneseConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_multiple_size=self.intermediate_multiple_size , hidden_act=self.hidden_act , hidden_dropout=self.hidden_dropout , attention_dropout=self.attention_dropout , weight_tying=self.weight_tying , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=UpperCamelCase_ , initializer_range=self.initializer_range , )
def lowerCAmelCase__ ( self: Optional[int] ):
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase = True
return config, input_ids, input_mask, token_labels
def lowerCAmelCase__ ( self: Dict , UpperCamelCase_: str , UpperCamelCase_: Optional[int] , UpperCamelCase_: List[Any] ):
__lowerCamelCase = GPTNeoXJapaneseModel(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
__lowerCamelCase = model(UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: List[Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any] ):
__lowerCamelCase = True
__lowerCamelCase = GPTNeoXJapaneseModel(UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def lowerCAmelCase__ ( self: Union[str, Any] , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: int , UpperCamelCase_: str , UpperCamelCase_: int ):
__lowerCamelCase = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , labels=UpperCamelCase_ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def lowerCAmelCase__ ( self: int , UpperCamelCase_: Union[str, Any] , UpperCamelCase_: List[Any] , UpperCamelCase_: Any ):
__lowerCamelCase = True
__lowerCamelCase = GPTNeoXJapaneseForCausalLM(config=UpperCamelCase_ )
model.to(UpperCamelCase_ )
model.eval()
# first forward pass
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , use_cache=UpperCamelCase_ )
__lowerCamelCase = outputs.past_key_values
# create hypothetical multiple next token and extent to next_input_ids
__lowerCamelCase = ids_tensor((self.batch_size, 3) , config.vocab_size )
__lowerCamelCase = ids_tensor((self.batch_size, 3) , vocab_size=2 )
# append to next input_ids and
__lowerCamelCase = torch.cat([input_ids, next_tokens] , dim=-1 )
__lowerCamelCase = torch.cat([input_mask, next_mask] , dim=-1 )
__lowerCamelCase = model(UpperCamelCase_ , attention_mask=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ )
__lowerCamelCase = output_from_no_past["""hidden_states"""][0]
__lowerCamelCase = model(
UpperCamelCase_ , attention_mask=UpperCamelCase_ , past_key_values=UpperCamelCase_ , output_hidden_states=UpperCamelCase_ , )["""hidden_states"""][0]
# select random slice
__lowerCamelCase = ids_tensor((1,) , output_from_past.shape[-1] ).item()
__lowerCamelCase = output_from_no_past[:, -3:, random_slice_idx].detach()
__lowerCamelCase = output_from_past[:, :, random_slice_idx].detach()
self.parent.assertTrue(output_from_past_slice.shape[1] == next_tokens.shape[1] )
# test that outputs are equal for slice
self.parent.assertTrue(torch.allclose(UpperCamelCase_ , UpperCamelCase_ , atol=1E-3 ) )
def lowerCAmelCase__ ( self: Any ):
__lowerCamelCase = self.prepare_config_and_inputs()
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = config_and_inputs
__lowerCamelCase = {"""input_ids""": input_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class lowerCamelCase__( __lowerCamelCase , __lowerCamelCase , unittest.TestCase):
UpperCAmelCase__ : List[Any] = (GPTNeoXJapaneseModel, GPTNeoXJapaneseForCausalLM) if is_torch_available() else ()
UpperCAmelCase__ : Dict = (GPTNeoXJapaneseForCausalLM,) if is_torch_available() else ()
UpperCAmelCase__ : Dict = (
{'feature-extraction': GPTNeoXJapaneseModel, 'text-generation': GPTNeoXJapaneseForCausalLM}
if is_torch_available()
else {}
)
UpperCAmelCase__ : Optional[int] = False
UpperCAmelCase__ : List[Any] = False
UpperCAmelCase__ : Tuple = False
UpperCAmelCase__ : Optional[int] = False
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase = GPTNeoXJapaneseModelTester(self )
__lowerCamelCase = ConfigTester(self , config_class=UpperCamelCase_ , hidden_size=37 )
def lowerCAmelCase__ ( self: Optional[Any] ):
self.config_tester.run_common_tests()
def lowerCAmelCase__ ( self: int ):
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: Any ):
# This regression test was failing with PyTorch < 1.3
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs_for_decoder()
__lowerCamelCase = None
self.model_tester.create_and_check_model_as_decoder(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: List[str] ):
__lowerCamelCase, __lowerCamelCase, __lowerCamelCase, __lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_decoder_model_past_large_inputs(UpperCamelCase_ , UpperCamelCase_ , UpperCamelCase_ )
def lowerCAmelCase__ ( self: str ):
__lowerCamelCase = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_causal_lm(*UpperCamelCase_ )
@slow
def lowerCAmelCase__ ( self: Dict ):
__lowerCamelCase = """abeja/gpt-neox-japanese-2.7b"""
__lowerCamelCase = ["""データサイエンティストとは、""", """100年後に必要とされる会社は、""", """フルリモートの環境で働くために必要なことは、""", """国境の長いトンネルを抜けると""", """美味しい日本食といえば、"""]
__lowerCamelCase = [
"""データサイエンティストとは、データを分析し、ビジネスに役立つ知見を導き出す専門家のことです。""",
"""100年後に必要とされる会社は、「人」が中心の会社です。""",
"""フルリモートの環境で働くために必要なことは、「自分の時間をコントロールする」ことです。""",
"""国境の長いトンネルを抜けると、そこは雪国だった。""",
"""美味しい日本食といえば、やっぱりお寿司ですよね。""",
]
__lowerCamelCase = GPTNeoXJapaneseTokenizer.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = GPTNeoXJapaneseForCausalLM.from_pretrained(UpperCamelCase_ )
__lowerCamelCase = []
for prompt in prompts:
__lowerCamelCase = tokenizer(UpperCamelCase_ , return_tensors="""pt""" ).input_ids
__lowerCamelCase = model.generate(UpperCamelCase_ , max_length=50 )
__lowerCamelCase = tokenizer.batch_decode(UpperCamelCase_ , skip_special_tokens=UpperCamelCase_ )
predicted_outputs += generated_string
self.assertListEqual(UpperCamelCase_ , UpperCamelCase_ )
| 12 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase :Union[str, Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | 0 |
import pytest
import datasets
# Import fixture modules as plugins
lowerCAmelCase : str = ["""tests.fixtures.files""", """tests.fixtures.hub""", """tests.fixtures.fsspec"""]
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# Mark tests as "unit" by default if not marked as "integration" (or already marked as "unit")
for item in items:
if any(marker in item.keywords for marker in ["integration", "unit"] ):
continue
item.add_marker(pytest.mark.unit )
def A_ ( _UpperCAmelCase ):
config.addinivalue_line("markers" , "torchaudio_latest: mark test to run with torchaudio>=0.12" )
@pytest.fixture(autouse=_UpperCAmelCase )
def A_ ( _UpperCAmelCase , _UpperCAmelCase ):
# test_hf_cache_home = tmp_path_factory.mktemp("cache") # TODO: why a cache dir per test function does not work?
SCREAMING_SNAKE_CASE_: List[Any] = tmp_path_factory.getbasetemp() / "cache"
SCREAMING_SNAKE_CASE_: Union[str, Any] = test_hf_cache_home / "datasets"
SCREAMING_SNAKE_CASE_: Tuple = test_hf_cache_home / "metrics"
SCREAMING_SNAKE_CASE_: Union[str, Any] = test_hf_cache_home / "modules"
monkeypatch.setattr("datasets.config.HF_DATASETS_CACHE" , str(_UpperCAmelCase ) )
monkeypatch.setattr("datasets.config.HF_METRICS_CACHE" , str(_UpperCAmelCase ) )
monkeypatch.setattr("datasets.config.HF_MODULES_CACHE" , str(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: int = test_hf_datasets_cache / "downloads"
monkeypatch.setattr("datasets.config.DOWNLOADED_DATASETS_PATH" , str(_UpperCAmelCase ) )
SCREAMING_SNAKE_CASE_: Optional[int] = test_hf_datasets_cache / "downloads" / "extracted"
monkeypatch.setattr("datasets.config.EXTRACTED_DATASETS_PATH" , str(_UpperCAmelCase ) )
@pytest.fixture(autouse=_UpperCAmelCase , scope="session" )
def A_ ( ):
datasets.disable_progress_bar()
@pytest.fixture(autouse=_UpperCAmelCase )
def A_ ( _UpperCAmelCase ):
# don't take tests into account when counting downloads
monkeypatch.setattr("datasets.config.HF_UPDATE_DOWNLOAD_COUNTS" , _UpperCAmelCase )
@pytest.fixture
def A_ ( _UpperCAmelCase ):
# Required to suppress RemovedIn20Warning when feature(s) are not compatible with SQLAlchemy 2.0
# To be removed once SQLAlchemy 2.0 supported
monkeypatch.setattr("sqlalchemy.util.deprecations.SILENCE_UBER_WARNING" , _UpperCAmelCase )
| 13 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase :Any = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , **_A : Union[str, Any] ) -> Tuple:
super().__init__(**_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] , _A : Union[str, List[str], "Image", List["Image"]] , **_A : Dict ) -> Dict:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> Optional[int]:
__magic_name__ : str = {}
if "candidate_labels" in kwargs:
__magic_name__ : str = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__magic_name__ : Tuple = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __lowerCAmelCase ( self : str , _A : Dict , _A : Optional[Any]=None , _A : int="This is a photo of {}." ) -> int:
__magic_name__ : Dict = load_image(_A )
__magic_name__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
__magic_name__ : Optional[Any] = candidate_labels
__magic_name__ : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels]
__magic_name__ : str = self.tokenizer(_A , return_tensors=self.framework , padding=_A )
__magic_name__ : Optional[Any] = [text_inputs]
return inputs
def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> str:
__magic_name__ : str = model_inputs.pop('candidate_labels' )
__magic_name__ : str = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , _A ):
__magic_name__ : Dict = text_inputs[0]
else:
# Batching case.
__magic_name__ : Optional[Any] = text_inputs[0][0]
__magic_name__ : List[Any] = self.model(**_A , **_A )
__magic_name__ : str = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[int]:
__magic_name__ : Tuple = model_outputs.pop('candidate_labels' )
__magic_name__ : Union[str, Any] = model_outputs['logits'][0]
if self.framework == "pt":
__magic_name__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 )
__magic_name__ : Tuple = probs.tolist()
if not isinstance(_A , _A ):
__magic_name__ : Any = [scores]
elif self.framework == "tf":
__magic_name__ : Any = stable_softmax(_A , axis=-1 )
__magic_name__ : Dict = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__magic_name__ : Union[str, Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] )
]
return result | 331 | 0 |
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
_lowerCamelCase : List[str] = {
"""iou_prediction_head.layers.0""": """iou_prediction_head.proj_in""",
"""iou_prediction_head.layers.1""": """iou_prediction_head.layers.0""",
"""iou_prediction_head.layers.2""": """iou_prediction_head.proj_out""",
"""mask_decoder.output_upscaling.0""": """mask_decoder.upscale_conv1""",
"""mask_decoder.output_upscaling.1""": """mask_decoder.upscale_layer_norm""",
"""mask_decoder.output_upscaling.3""": """mask_decoder.upscale_conv2""",
"""mask_downscaling.0""": """mask_embed.conv1""",
"""mask_downscaling.1""": """mask_embed.layer_norm1""",
"""mask_downscaling.3""": """mask_embed.conv2""",
"""mask_downscaling.4""": """mask_embed.layer_norm2""",
"""mask_downscaling.6""": """mask_embed.conv3""",
"""point_embeddings""": """point_embed""",
"""pe_layer.positional_encoding_gaussian_matrix""": """shared_embedding.positional_embedding""",
"""image_encoder""": """vision_encoder""",
"""neck.0""": """neck.conv1""",
"""neck.1""": """neck.layer_norm1""",
"""neck.2""": """neck.conv2""",
"""neck.3""": """neck.layer_norm2""",
"""patch_embed.proj""": """patch_embed.projection""",
""".norm""": """.layer_norm""",
"""blocks""": """layers""",
}
def SCREAMING_SNAKE_CASE ( lowercase_ ) -> List[Any]:
"""simple docstring"""
A__ = {}
state_dict.pop('''pixel_mean''' , lowercase_ )
state_dict.pop('''pixel_std''' , lowercase_ )
A__ = R'''.*.output_hypernetworks_mlps.(\d+).layers.(\d+).*'''
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
A__ = key.replace(lowercase_ , lowercase_ )
if re.match(lowercase_ , lowercase_ ):
A__ = int(re.match(lowercase_ , lowercase_ ).group(2 ) )
if layer_nb == 0:
A__ = key.replace('''layers.0''' , '''proj_in''' )
elif layer_nb == 1:
A__ = key.replace('''layers.1''' , '''layers.0''' )
elif layer_nb == 2:
A__ = key.replace('''layers.2''' , '''proj_out''' )
A__ = value
A__ = model_state_dict[
'''prompt_encoder.shared_embedding.positional_embedding'''
]
return model_state_dict
def SCREAMING_SNAKE_CASE ( lowercase_ , lowercase_ , lowercase_ , lowercase_="ybelkada/segment-anything" ) -> Union[str, Any]:
"""simple docstring"""
A__ = hf_hub_download(lowercase_ , f"""checkpoints/{model_name}.pth""" )
if "sam_vit_b" in model_name:
A__ = SamConfig()
elif "sam_vit_l" in model_name:
A__ = SamVisionConfig(
hidden_size=1_024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
A__ = SamConfig(
vision_config=lowercase_ , )
elif "sam_vit_h" in model_name:
A__ = SamVisionConfig(
hidden_size=1_280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
A__ = SamConfig(
vision_config=lowercase_ , )
A__ = torch.load(lowercase_ , map_location='''cpu''' )
A__ = replace_keys(lowercase_ )
A__ = SamImageProcessor()
A__ = SamProcessor(image_processor=lowercase_ )
A__ = SamModel(lowercase_ )
hf_model.load_state_dict(lowercase_ )
A__ = hf_model.to('''cuda''' )
A__ = '''https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png'''
A__ = Image.open(requests.get(lowercase_ , stream=lowercase_ ).raw ).convert('''RGB''' )
A__ = [[[400, 650]]]
A__ = [[1]]
A__ = processor(images=np.array(lowercase_ ) , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
A__ = hf_model(**lowercase_ )
A__ = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.5_79_89_02_51_15_96_68
A__ = processor(
images=np.array(lowercase_ ) , input_points=lowercase_ , input_labels=lowercase_ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
A__ = hf_model(**lowercase_ )
A__ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.97_12_60_30_92_19_36_04
A__ = ((75, 275, 1_725, 850),)
A__ = processor(images=np.array(lowercase_ ) , input_boxes=lowercase_ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
A__ = hf_model(**lowercase_ )
A__ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.86_86_01_56_05_92_65_14
# Test with 2 points and 1 image.
A__ = [[[400, 650], [800, 650]]]
A__ = [[1, 1]]
A__ = processor(
images=np.array(lowercase_ ) , input_points=lowercase_ , input_labels=lowercase_ , return_tensors='''pt''' ).to('''cuda''' )
with torch.no_grad():
A__ = hf_model(**lowercase_ )
A__ = output.iou_scores.squeeze()
assert scores[-1].item() == 0.99_36_04_77_92_43_46_92
if __name__ == "__main__":
_lowerCamelCase : Any = argparse.ArgumentParser()
_lowerCamelCase : Union[str, Any] = ["""sam_vit_b_01ec64""", """sam_vit_h_4b8939""", """sam_vit_l_0b3195"""]
parser.add_argument(
"""--model_name""",
default="""sam_vit_h_4b8939""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
parser.add_argument("""--pytorch_dump_folder_path""", default=None, type=str, help="""Path to the output PyTorch model.""")
parser.add_argument(
"""--push_to_hub""",
action="""store_true""",
help="""Whether to push the model and processor to the hub after converting""",
)
parser.add_argument(
"""--model_hub_id""",
default="""ybelkada/segment-anything""",
choices=choices,
type=str,
help="""Path to hf config.json of model to convert""",
)
_lowerCamelCase : Optional[int] = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 14 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase :int = '''pt'''
elif is_tf_available():
lowerCAmelCase :Optional[Any] = '''tf'''
else:
lowerCAmelCase :Optional[Any] = '''jax'''
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = ByTaTokenizer
A_ : Dict = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
super().setUp()
__magic_name__ : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __lowerCAmelCase ( self : Tuple , **_A : Optional[int] ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : int=False , _A : Union[str, Any]=20 , _A : Optional[int]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__magic_name__ : Optional[Any] = []
for i in range(len(_A ) ):
try:
__magic_name__ : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__magic_name__ : Any = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) )
__magic_name__ : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
__magic_name__ : Optional[int] = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
__magic_name__ : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__magic_name__ : List[str] = [t[0] for t in toks]
# Ensure consistency
__magic_name__ : Optional[int] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
__magic_name__ : int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
__magic_name__ : Union[str, Any] = ' ' + output_txt
__magic_name__ : Dict = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def __lowerCAmelCase ( self : int ) -> str:
__magic_name__ : Any = self.ta_base_tokenizer
__magic_name__ : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
__magic_name__ : List[str] = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __lowerCAmelCase ( self : int ) -> Tuple:
__magic_name__ : Optional[int] = self.ta_base_tokenizer
__magic_name__ : Optional[int] = 'Unicode €.'
__magic_name__ : Optional[Any] = tokenizer(_A )
__magic_name__ : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__magic_name__ : Any = tokenizer.decode(_A )
self.assertEqual(_A , 'Unicode €.</s>' )
__magic_name__ : Any = tokenizer('e è é ê ë' )
__magic_name__ : str = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__magic_name__ : List[str] = tokenizer.decode(_A )
self.assertEqual(_A , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __lowerCAmelCase ( self : Any ) -> int:
__magic_name__ : List[Any] = self.ta_base_tokenizer
__magic_name__ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__magic_name__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__magic_name__ : Any = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
__magic_name__ : str = list(batch.input_ids.numpy()[0] )
else:
__magic_name__ : Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : str = self.ta_base_tokenizer
__magic_name__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__magic_name__ : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _A )
self.assertIn('attention_mask' , _A )
self.assertNotIn('decoder_input_ids' , _A )
self.assertNotIn('decoder_attention_mask' , _A )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
__magic_name__ : Union[str, Any] = self.ta_base_tokenizer
__magic_name__ : Tuple = [
'Summary of the text.',
'Another summary.',
]
__magic_name__ : Dict = tokenizer(
text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : str = self.ta_base_tokenizer
__magic_name__ : Any = ['A long paragraph for summarization. </s>']
__magic_name__ : List[str] = ['Summary of the text. </s>']
# fmt: off
__magic_name__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__magic_name__ : List[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__magic_name__ : str = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['input_ids'][0] )
self.assertEqual(_A , batch['labels'][0] )
def __lowerCAmelCase ( self : Any ) -> str:
# safety check on max_len default value so we are sure the test works
__magic_name__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__magic_name__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : str = tempfile.mkdtemp()
__magic_name__ : Tuple = ' He is very happy, UNwant\u00E9d,running'
__magic_name__ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ : List[str] = tokenizer.__class__.from_pretrained(_A )
__magic_name__ : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
__magic_name__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : Optional[Any] = tempfile.mkdtemp()
__magic_name__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__magic_name__ : Union[str, Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ : Any = tokenizer.__class__.from_pretrained(_A )
__magic_name__ : Dict = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__magic_name__ : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
__magic_name__ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__magic_name__ : Union[str, Any] = json.load(_A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__magic_name__ : Optional[Any] = json.load(_A )
__magic_name__ : List[str] = [F'<extra_id_{i}>' for i in range(125 )]
__magic_name__ : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
__magic_name__ : Tuple = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__ : str = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )]
__magic_name__ : Optional[Any] = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
__magic_name__ : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
__magic_name__ : List[Any] = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
pass
def __lowerCAmelCase ( self : List[str] ) -> int:
pass
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
pass
def __lowerCAmelCase ( self : List[Any] ) -> int:
pass
def __lowerCAmelCase ( self : str ) -> Tuple:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__magic_name__ : List[str] = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
__magic_name__ : int = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : List[str] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
__magic_name__ : List[str] = 0
__magic_name__ : str = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] )
setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] ) | 331 | 0 |
import gc
import math
import unittest
import torch
from diffusers import UNetaDModel
from diffusers.utils import floats_tensor, logging, slow, torch_all_close, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from .test_modeling_common import ModelTesterMixin, UNetTesterMixin
SCREAMING_SNAKE_CASE :int = logging.get_logger(__name__)
enable_full_determinism()
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = UNetaDModel
snake_case_ = "sample"
@property
def UpperCamelCase_ ( self : List[str] ):
__A = 4
__A = 3
__A = (32, 32)
__A = floats_tensor((batch_size, num_channels) + sizes ).to(A )
__A = torch.tensor([10] ).to(A )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase_ ( self : Any ):
return (3, 32, 32)
@property
def UpperCamelCase_ ( self : Any ):
return (3, 32, 32)
def UpperCamelCase_ ( self : Dict ):
__A = {
"block_out_channels": (32, 64),
"down_block_types": ("DownBlock2D", "AttnDownBlock2D"),
"up_block_types": ("AttnUpBlock2D", "UpBlock2D"),
"attention_head_dim": 3,
"out_channels": 3,
"in_channels": 3,
"layers_per_block": 2,
"sample_size": 32,
}
__A = self.dummy_input
return init_dict, inputs_dict
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = UNetaDModel
snake_case_ = "sample"
@property
def UpperCamelCase_ ( self : Tuple ):
__A = 4
__A = 4
__A = (32, 32)
__A = floats_tensor((batch_size, num_channels) + sizes ).to(A )
__A = torch.tensor([10] ).to(A )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase_ ( self : List[str] ):
return (4, 32, 32)
@property
def UpperCamelCase_ ( self : int ):
return (4, 32, 32)
def UpperCamelCase_ ( self : Union[str, Any] ):
__A = {
"sample_size": 32,
"in_channels": 4,
"out_channels": 4,
"layers_per_block": 2,
"block_out_channels": (32, 64),
"attention_head_dim": 32,
"down_block_types": ("DownBlock2D", "DownBlock2D"),
"up_block_types": ("UpBlock2D", "UpBlock2D"),
}
__A = self.dummy_input
return init_dict, inputs_dict
def UpperCamelCase_ ( self : Dict ):
__A , __A = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" ,output_loading_info=A )
self.assertIsNotNone(A )
self.assertEqual(len(loading_info["missing_keys"] ) ,0 )
model.to(A )
__A = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" ,"This test is supposed to run on GPU" )
def UpperCamelCase_ ( self : Union[str, Any] ):
__A , __A = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" ,output_loading_info=A )
model.to(A )
__A = model(**self.dummy_input ).sample
assert image is not None, "Make sure output is not None"
@unittest.skipIf(torch_device != "cuda" ,"This test is supposed to run on GPU" )
def UpperCamelCase_ ( self : Union[str, Any] ):
# by defautl model loading will use accelerate as `low_cpu_mem_usage=True`
__A , __A = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" ,output_loading_info=A )
model_accelerate.to(A )
model_accelerate.eval()
__A = torch.randn(
1 ,model_accelerate.config.in_channels ,model_accelerate.config.sample_size ,model_accelerate.config.sample_size ,generator=torch.manual_seed(0 ) ,)
__A = noise.to(A )
__A = torch.tensor([10] * noise.shape[0] ).to(A )
__A = model_accelerate(A ,A )["sample"]
# two models don't need to stay in the device at the same time
del model_accelerate
torch.cuda.empty_cache()
gc.collect()
__A , __A = UNetaDModel.from_pretrained(
"fusing/unet-ldm-dummy-update" ,output_loading_info=A ,low_cpu_mem_usage=A )
model_normal_load.to(A )
model_normal_load.eval()
__A = model_normal_load(A ,A )["sample"]
assert torch_all_close(A ,A ,rtol=1E-3 )
def UpperCamelCase_ ( self : Optional[int] ):
__A = UNetaDModel.from_pretrained("fusing/unet-ldm-dummy-update" )
model.eval()
model.to(A )
__A = torch.randn(
1 ,model.config.in_channels ,model.config.sample_size ,model.config.sample_size ,generator=torch.manual_seed(0 ) ,)
__A = noise.to(A )
__A = torch.tensor([10] * noise.shape[0] ).to(A )
with torch.no_grad():
__A = model(A ,A ).sample
__A = output[0, -1, -3:, -3:].flatten().cpu()
# fmt: off
__A = torch.tensor([-13.32_58, -20.11_00, -15.98_73, -17.66_17, -23.05_96, -17.94_19, -13.36_75, -16.18_89, -12.38_00] )
# fmt: on
self.assertTrue(torch_all_close(A ,A ,rtol=1E-3 ) )
class UpperCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
snake_case_ = UNetaDModel
snake_case_ = "sample"
@property
def UpperCamelCase_ ( self : Optional[Any] ,A : List[str]=(32, 32) ):
__A = 4
__A = 3
__A = floats_tensor((batch_size, num_channels) + sizes ).to(A )
__A = torch.tensor(batch_size * [10] ).to(dtype=torch.intaa ,device=A )
return {"sample": noise, "timestep": time_step}
@property
def UpperCamelCase_ ( self : List[str] ):
return (3, 32, 32)
@property
def UpperCamelCase_ ( self : List[Any] ):
return (3, 32, 32)
def UpperCamelCase_ ( self : int ):
__A = {
"block_out_channels": [32, 64, 64, 64],
"in_channels": 3,
"layers_per_block": 1,
"out_channels": 3,
"time_embedding_type": "fourier",
"norm_eps": 1E-6,
"mid_block_scale_factor": math.sqrt(2.0 ),
"norm_num_groups": None,
"down_block_types": [
"SkipDownBlock2D",
"AttnSkipDownBlock2D",
"SkipDownBlock2D",
"SkipDownBlock2D",
],
"up_block_types": [
"SkipUpBlock2D",
"SkipUpBlock2D",
"AttnSkipUpBlock2D",
"SkipUpBlock2D",
],
}
__A = self.dummy_input
return init_dict, inputs_dict
@slow
def UpperCamelCase_ ( self : List[str] ):
__A , __A = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" ,output_loading_info=A )
self.assertIsNotNone(A )
self.assertEqual(len(loading_info["missing_keys"] ) ,0 )
model.to(A )
__A = self.dummy_input
__A = floats_tensor((4, 3) + (2_56, 2_56) ).to(A )
__A = noise
__A = model(**A )
assert image is not None, "Make sure output is not None"
@slow
def UpperCamelCase_ ( self : Tuple ):
__A = UNetaDModel.from_pretrained("google/ncsnpp-celebahq-256" )
model.to(A )
__A = 4
__A = 3
__A = (2_56, 2_56)
__A = torch.ones((batch_size, num_channels) + sizes ).to(A )
__A = torch.tensor(batch_size * [1E-4] ).to(A )
with torch.no_grad():
__A = model(A ,A ).sample
__A = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__A = torch.tensor([-48_42.86_91, -64_99.66_31, -38_00.19_53, -79_78.26_86, -1_09_80.71_29, -2_00_28.85_35, 81_48.28_22, 23_42.29_05, 5_67.76_08] )
# fmt: on
self.assertTrue(torch_all_close(A ,A ,rtol=1E-2 ) )
def UpperCamelCase_ ( self : str ):
__A = UNetaDModel.from_pretrained("fusing/ncsnpp-ffhq-ve-dummy-update" )
model.to(A )
__A = 4
__A = 3
__A = (32, 32)
__A = torch.ones((batch_size, num_channels) + sizes ).to(A )
__A = torch.tensor(batch_size * [1E-4] ).to(A )
with torch.no_grad():
__A = model(A ,A ).sample
__A = output[0, -3:, -3:, -1].flatten().cpu()
# fmt: off
__A = torch.tensor([-0.03_25, -0.09_00, -0.08_69, -0.03_32, -0.07_25, -0.02_70, -0.01_01, 0.02_27, 0.02_56] )
# fmt: on
self.assertTrue(torch_all_close(A ,A ,rtol=1E-2 ) )
def UpperCamelCase_ ( self : Union[str, Any] ):
# not required for this model
pass
| 15 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ : Dict = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
__magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]]
__magic_name__ : List[Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 )
__magic_name__ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 )
__magic_name__ : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 )
__magic_name__ : Any = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] ) | 331 | 0 |
"""simple docstring"""
import copy
import os
import tempfile
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pyarrow as pa
import pyarrow.parquet as pq
import pytest
from datasets.arrow_writer import ArrowWriter, OptimizedTypedSequence, ParquetWriter, TypedSequence
from datasets.features import ArrayaD, ClassLabel, Features, Image, Value
from datasets.features.features import ArrayaDExtensionType, cast_to_python_objects
from datasets.keyhash import DuplicatedKeysError, InvalidKeyError
from .utils import require_pil
class __A ( A_ ):
'''simple docstring'''
def UpperCAmelCase ( self : Dict ) -> List[str]:
"""simple docstring"""
lowercase__ : int = pa.array(TypedSequence([1, 2, 3] ) )
self.assertEqual(arr.type ,pa.intaa() )
def UpperCAmelCase ( self : Optional[Any] ) -> int:
"""simple docstring"""
with self.assertRaises(_snake_case ):
lowercase__ : Optional[Any] = pa.array(TypedSequence([1, 2, 3] ) ,type=pa.intaa() )
def UpperCAmelCase ( self : str ) -> List[Any]:
"""simple docstring"""
with self.assertRaises(_snake_case ):
lowercase__ : Dict = pa.array(TypedSequence([1, 2, 3] ,try_type=Value('''bool''' ) ,type=Value('''int64''' ) ) )
def UpperCAmelCase ( self : Any ) -> Tuple:
"""simple docstring"""
lowercase__ : Union[str, Any] = pa.array(TypedSequence([1, 2, 3] ,type=Value('''int32''' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def UpperCAmelCase ( self : Optional[int] ) -> Optional[int]:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowercase__ : List[str] = pa.array(TypedSequence(['''foo''', '''bar'''] ,type=Value('''int64''' ) ) )
def UpperCAmelCase ( self : Optional[int] ) -> Any:
"""simple docstring"""
lowercase__ : Union[str, Any] = pa.array(TypedSequence([1, 2, 3] ,try_type=Value('''int32''' ) ) )
self.assertEqual(arr.type ,pa.intaa() )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Dict = pa.array(TypedSequence(['''foo''', '''bar'''] ,try_type=Value('''int64''' ) ) )
self.assertEqual(arr.type ,pa.string() )
def UpperCAmelCase ( self : Tuple ) -> List[str]:
"""simple docstring"""
lowercase__ : str = pa.array(TypedSequence([[[1, 2, 3]]] ,type=ArrayaD((1, 3) ,'''int64''' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'''int64''' ) )
def UpperCAmelCase ( self : Optional[int] ) -> str:
"""simple docstring"""
with self.assertRaises((TypeError, pa.lib.ArrowInvalid) ):
lowercase__ : List[str] = pa.array(TypedSequence(['''foo''', '''bar'''] ,type=ArrayaD((1, 3) ,'''int64''' ) ) )
def UpperCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
"""simple docstring"""
lowercase__ : Tuple = pa.array(TypedSequence([[[1, 2, 3]]] ,try_type=ArrayaD((1, 3) ,'''int64''' ) ) )
self.assertEqual(arr.type ,ArrayaDExtensionType((1, 3) ,'''int64''' ) )
def UpperCAmelCase ( self : Tuple ) -> str:
"""simple docstring"""
lowercase__ : List[Any] = pa.array(TypedSequence(['''foo''', '''bar'''] ,try_type=ArrayaD((1, 3) ,'''int64''' ) ) )
self.assertEqual(arr.type ,pa.string() )
@require_pil
def UpperCAmelCase ( self : Any ) -> int:
"""simple docstring"""
import PIL.Image
lowercase__ : List[Any] = PIL.Image.fromarray(np.arange(10 ,dtype=np.uinta ).reshape(2 ,5 ) )
with patch(
'''datasets.arrow_writer.cast_to_python_objects''' ,side_effect=_snake_case ) as mock_cast_to_python_objects:
lowercase__ : int = pa.array(TypedSequence([{'''path''': None, '''bytes''': b'''image_bytes'''}, pil_image] ,type=Image() ) )
lowercase__ , lowercase__ : Tuple = mock_cast_to_python_objects.call_args_list[-1]
self.assertIn('''optimize_list_casting''' ,_snake_case )
self.assertFalse(kwargs['''optimize_list_casting'''] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> str:
lowercase__ : Optional[int] = pa.BufferReader(__lowerCamelCase ) if isinstance(__lowerCamelCase , pa.Buffer ) else pa.memory_map(__lowerCamelCase )
lowercase__ : Dict = pa.ipc.open_stream(__lowerCamelCase )
lowercase__ : pa.Table = f.read_all()
assert len(pa_table.to_batches() ) == expected_num_chunks
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
del pa_table
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
lowercase__ : List[Any] = pa.BufferOutputStream()
lowercase__ : Dict = pa.schema(__lowerCamelCase ) if fields else None
with ArrowWriter(stream=__lowerCamelCase , schema=__lowerCamelCase , writer_batch_size=__lowerCamelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
lowercase__ , lowercase__ : Tuple = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase__ : Any = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __UpperCAmelCase ( ) -> List[str]:
lowercase__ : List[Any] = pa.BufferOutputStream()
lowercase__ : str = Features({'''labels''': ClassLabel(names=['''neg''', '''pos'''] )} )
with ArrowWriter(stream=__lowerCamelCase , features=__lowerCamelCase ) as writer:
writer.write({'''labels''': 0} )
writer.write({'''labels''': 1} )
lowercase__ , lowercase__ : Optional[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == features.arrow_schema
assert writer._schema.metadata == features.arrow_schema.metadata
lowercase__ : Optional[int] = pa.BufferReader(output.getvalue() )
lowercase__ : Tuple = pa.ipc.open_stream(__lowerCamelCase )
lowercase__ : pa.Table = f.read_all()
lowercase__ : Dict = pa_table.schema
assert pa_table.num_rows == 2
assert schema == features.arrow_schema
assert schema.metadata == features.arrow_schema.metadata
assert features == Features.from_arrow_schema(__lowerCamelCase )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
def __UpperCAmelCase ( __lowerCamelCase ) -> List[Any]:
lowercase__ : Optional[int] = pa.BufferOutputStream()
with ArrowWriter(
stream=__lowerCamelCase , writer_batch_size=__lowerCamelCase , hash_salt='''split_name''' , check_duplicates=__lowerCamelCase , ) as writer:
with pytest.raises(__lowerCamelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=[1, 2] )
lowercase__ , lowercase__ : Dict = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def __UpperCAmelCase ( __lowerCamelCase ) -> Any:
lowercase__ : str = pa.BufferOutputStream()
with ArrowWriter(
stream=__lowerCamelCase , writer_batch_size=__lowerCamelCase , hash_salt='''split_name''' , check_duplicates=__lowerCamelCase , ) as writer:
with pytest.raises(__lowerCamelCase ):
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=10 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=10 )
lowercase__ , lowercase__ : str = writer.finalize()
@pytest.mark.parametrize('''writer_batch_size''' , [None, 2, 10] )
def __UpperCAmelCase ( __lowerCamelCase ) -> Optional[Any]:
lowercase__ : str = pa.BufferOutputStream()
with ArrowWriter(
stream=__lowerCamelCase , writer_batch_size=__lowerCamelCase , hash_salt='''split_name''' , check_duplicates=__lowerCamelCase , ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} , key=1 )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} , key=2 )
lowercase__ , lowercase__ : List[str] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> List[Any]:
lowercase__ : int = pa.BufferOutputStream()
lowercase__ : Tuple = pa.schema(__lowerCamelCase ) if fields else None
with ArrowWriter(stream=__lowerCamelCase , schema=__lowerCamelCase , writer_batch_size=__lowerCamelCase ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
writer.write_batch({'''col_1''': [], '''col_2''': []} )
lowercase__ , lowercase__ : List[Any] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase__ : Tuple = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : Dict = pa.BufferOutputStream()
lowercase__ : Tuple = pa.schema(__lowerCamelCase ) if fields else None
with ArrowWriter(stream=__lowerCamelCase , schema=__lowerCamelCase , writer_batch_size=__lowerCamelCase ) as writer:
writer.write_table(pa.Table.from_pydict({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} ) )
lowercase__ , lowercase__ : Any = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase__ : Any = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
@pytest.mark.parametrize('''writer_batch_size''' , [None, 1, 10] )
@pytest.mark.parametrize(
'''fields''' , [None, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}, {'''col_1''': pa.string(), '''col_2''': pa.intaa()}] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[Any]:
lowercase__ : Dict = pa.BufferOutputStream()
lowercase__ : Dict = pa.schema(__lowerCamelCase ) if fields else None
with ArrowWriter(stream=__lowerCamelCase , schema=__lowerCamelCase , writer_batch_size=__lowerCamelCase ) as writer:
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''foo'''], '''col_2''': [1]} ) )
writer.write_row(pa.Table.from_pydict({'''col_1''': ['''bar'''], '''col_2''': [2]} ) )
lowercase__ , lowercase__ : Dict = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
if not fields:
lowercase__ : Optional[int] = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata )
_check_output(output.getvalue() , expected_num_chunks=num_examples if writer_batch_size == 1 else 1 )
def __UpperCAmelCase ( ) -> int:
with tempfile.TemporaryDirectory() as tmp_dir:
lowercase__ : Optional[int] = {'''col_1''': pa.string(), '''col_2''': pa.intaa()}
lowercase__ : Dict = os.path.join(__lowerCamelCase , '''test.arrow''' )
with ArrowWriter(path=__lowerCamelCase , schema=pa.schema(__lowerCamelCase ) ) as writer:
writer.write_batch({'''col_1''': ['''foo''', '''bar'''], '''col_2''': [1, 2]} )
lowercase__ , lowercase__ : str = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert writer._schema == pa.schema(__lowerCamelCase , metadata=writer._schema.metadata )
_check_output(__lowerCamelCase , 1 )
def __UpperCAmelCase ( __lowerCamelCase ) -> Tuple:
if pa.types.is_list(__lowerCamelCase ):
return get_base_dtype(arr_type.value_type )
else:
return arr_type
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
if isinstance(lst[0] , __lowerCamelCase ):
change_first_primitive_element_in_list(lst[0] , __lowerCamelCase )
else:
lowercase__ : Optional[Any] = value
@pytest.mark.parametrize('''optimized_int_type, expected_dtype''' , [(None, pa.intaa()), (Value('''int32''' ), pa.intaa())] )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : Optional[int] = pa.array(TypedSequence(__lowerCamelCase , optimized_int_type=__lowerCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
@pytest.mark.parametrize(
'''col, expected_dtype''' , [
('''attention_mask''', pa.inta()),
('''special_tokens_mask''', pa.inta()),
('''token_type_ids''', pa.inta()),
('''input_ids''', pa.intaa()),
('''other''', pa.intaa()),
] , )
@pytest.mark.parametrize('''sequence''' , [[1, 2, 3], [[1, 2, 3]], [[[1, 2, 3]]]] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase , __lowerCamelCase ) -> Any:
# in range
lowercase__ : Optional[int] = pa.array(OptimizedTypedSequence(__lowerCamelCase , col=__lowerCamelCase ) )
assert get_base_dtype(arr.type ) == expected_dtype
# not in range
if col != "other":
# avoids errors due to in-place modifications
lowercase__ : Tuple = copy.deepcopy(__lowerCamelCase )
lowercase__ : Dict = np.iinfo(expected_dtype.to_pandas_dtype() ).max + 1
change_first_primitive_element_in_list(__lowerCamelCase , __lowerCamelCase )
lowercase__ : int = pa.array(OptimizedTypedSequence(__lowerCamelCase , col=__lowerCamelCase ) )
assert get_base_dtype(arr.type ) == pa.intaa()
@pytest.mark.parametrize('''raise_exception''' , [False, True] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Optional[int]:
lowercase__ : List[Any] = str(tmp_path / '''dataset-train.arrow''' )
try:
with ArrowWriter(path=__lowerCamelCase ) as writer:
if raise_exception:
raise pa.lib.ArrowInvalid()
else:
writer.stream.close()
except pa.lib.ArrowInvalid:
pass
finally:
assert writer.stream.closed
def __UpperCAmelCase ( __lowerCamelCase ) -> Union[str, Any]:
lowercase__ : int = '''mock://dataset-train.arrow'''
with ArrowWriter(path=__lowerCamelCase , storage_options=mockfs.storage_options ) as writer:
assert isinstance(writer._fs , type(__lowerCamelCase ) )
assert writer._fs.storage_options == mockfs.storage_options
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
lowercase__ , lowercase__ : int = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
assert mockfs.exists(__lowerCamelCase )
def __UpperCAmelCase ( ) -> Optional[int]:
lowercase__ : List[Any] = pa.BufferOutputStream()
with ParquetWriter(stream=__lowerCamelCase ) as writer:
writer.write({'''col_1''': '''foo''', '''col_2''': 1} )
writer.write({'''col_1''': '''bar''', '''col_2''': 2} )
lowercase__ , lowercase__ : List[str] = writer.finalize()
assert num_examples == 2
assert num_bytes > 0
lowercase__ : str = pa.BufferReader(output.getvalue() )
lowercase__ : pa.Table = pq.read_table(__lowerCamelCase )
assert pa_table.to_pydict() == {"col_1": ["foo", "bar"], "col_2": [1, 2]}
@require_pil
@pytest.mark.parametrize('''embed_local_files''' , [False, True] )
def __UpperCAmelCase ( __lowerCamelCase , __lowerCamelCase ) -> Dict:
import PIL.Image
lowercase__ : Dict = str(tmp_path / '''test_image_rgb.jpg''' )
PIL.Image.fromarray(np.zeros((5, 5) , dtype=np.uinta ) ).save(__lowerCamelCase , format='''png''' )
lowercase__ : Tuple = pa.BufferOutputStream()
with ParquetWriter(
stream=__lowerCamelCase , features=Features({'''image''': Image()} ) , embed_local_files=__lowerCamelCase ) as writer:
writer.write({'''image''': image_path} )
writer.finalize()
lowercase__ : Any = pa.BufferReader(output.getvalue() )
lowercase__ : pa.Table = pq.read_table(__lowerCamelCase )
lowercase__ : Union[str, Any] = pa_table.to_pydict()
if embed_local_files:
assert isinstance(out['''image'''][0]['''path'''] , __lowerCamelCase )
with open(__lowerCamelCase , '''rb''' ) as f:
assert out["image"][0]["bytes"] == f.read()
else:
assert out["image"][0]["path"] == image_path
assert out["image"][0]["bytes"] is None
def __UpperCAmelCase ( ) -> Union[str, Any]:
lowercase__ : Optional[Any] = pa.schema([pa.field('''col_1''' , pa.string() , nullable=__lowerCamelCase )] )
lowercase__ : List[Any] = pa.BufferOutputStream()
with ArrowWriter(stream=__lowerCamelCase ) as writer:
writer._build_writer(inferred_schema=__lowerCamelCase )
assert writer._schema == pa.schema([pa.field('''col_1''' , pa.string() )] )
| 16 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase :List[str] = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
lowerCAmelCase :List[Any] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Tuple = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Optional[Any] = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[int] = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowerCAmelCase :Tuple = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Union[str, Any] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowerCAmelCase :Dict = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowerCAmelCase :Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowerCAmelCase :Any = ''''''
lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
__magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : str = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
__magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Any = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) | 331 | 0 |
"""simple docstring"""
import cmath
import math
def _A ( UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float, UpperCamelCase_ : float) -> complex:
'''simple docstring'''
__lowercase = math.radians(UpperCamelCase_)
__lowercase = math.radians(UpperCamelCase_)
# Convert voltage and current to rectangular form
__lowercase = cmath.rect(UpperCamelCase_, UpperCamelCase_)
__lowercase = cmath.rect(UpperCamelCase_, UpperCamelCase_)
# Calculate apparent power
return voltage_rect * current_rect
if __name__ == "__main__":
import doctest
doctest.testmod()
| 17 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Optional[int]=7 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=True , _A : Dict=True , _A : int=99 , _A : str=32 , _A : List[Any]=2 , _A : Any=4 , _A : List[str]=37 , _A : List[str]="gelu" , _A : Any=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Union[str, Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : str=4 , _A : int=None , ) -> int:
__magic_name__ : str = parent
__magic_name__ : List[Any] = 13
__magic_name__ : Union[str, Any] = 7
__magic_name__ : Tuple = True
__magic_name__ : Dict = True
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = True
__magic_name__ : int = 99
__magic_name__ : List[str] = 384
__magic_name__ : Optional[int] = 2
__magic_name__ : List[Any] = 4
__magic_name__ : int = 37
__magic_name__ : Union[str, Any] = 'gelu'
__magic_name__ : Optional[int] = 0.1
__magic_name__ : str = 0.1
__magic_name__ : Optional[Any] = 512
__magic_name__ : Any = 16
__magic_name__ : Union[str, Any] = 2
__magic_name__ : Any = 0.02
__magic_name__ : List[str] = 3
__magic_name__ : Tuple = 4
__magic_name__ : List[Any] = 128
__magic_name__ : Optional[Any] = 2
__magic_name__ : List[str] = 9
__magic_name__ : str = 1
__magic_name__ : List[str] = None
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[Any] = None
if self.use_input_mask:
__magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[str] = None
if self.use_token_type_ids:
__magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
__magic_name__ : int = None
if self.use_labels:
__magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : int , _A : int , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : int , _A : Union[str, Any] ) -> Any:
__magic_name__ : Dict = TFConvBertModel(config=_A )
__magic_name__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__magic_name__ : Any = [input_ids, input_mask]
__magic_name__ : Tuple = model(_A )
__magic_name__ : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : int , _A : str , _A : Dict , _A : Dict , _A : Dict , _A : Any , _A : Optional[int] , _A : int ) -> Optional[Any]:
__magic_name__ : Dict = TFConvBertForMaskedLM(config=_A )
__magic_name__ : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> Tuple:
__magic_name__ : Any = self.num_labels
__magic_name__ : str = TFConvBertForSequenceClassification(config=_A )
__magic_name__ : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : str , _A : str , _A : int , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = self.num_choices
__magic_name__ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
__magic_name__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : str = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Optional[int] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : List[Any] , _A : int , _A : List[str] , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : Optional[int] ) -> List[Any]:
__magic_name__ : List[Any] = self.num_labels
__magic_name__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
__magic_name__ : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[str] ) -> int:
__magic_name__ : Dict = TFConvBertForQuestionAnswering(config=_A )
__magic_name__ : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[str] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : str = config_and_inputs
__magic_name__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : List[str] = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Tuple = False
A_ : Any = False
A_ : List[Any] = False
def __lowerCAmelCase ( self : List[Any] ) -> int:
__magic_name__ : Optional[Any] = TFConvBertModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : int ) -> Any:
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : Dict ) -> List[str]:
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[int] = True
__magic_name__ : Any = True
if hasattr(_A , 'use_cache' ):
__magic_name__ : List[Any] = True
__magic_name__ : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : Optional[Any] = getattr(self.model_tester , 'key_length' , _A )
for model_class in self.all_model_classes:
__magic_name__ : List[str] = self._prepare_for_class(_A , _A )
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Tuple = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
__magic_name__ : Union[str, Any] = os.path.join(_A , 'saved_model' , '1' )
__magic_name__ : Optional[int] = tf.keras.models.load_model(_A )
__magic_name__ : Optional[Any] = model(_A )
if self.is_encoder_decoder:
__magic_name__ : Optional[int] = outputs['encoder_hidden_states']
__magic_name__ : Tuple = outputs['encoder_attentions']
else:
__magic_name__ : Union[str, Any] = outputs['hidden_states']
__magic_name__ : Optional[Any] = outputs['attentions']
self.assertEqual(len(_A ) , _A )
__magic_name__ : Optional[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
__magic_name__ : Optional[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_A )
def __lowerCAmelCase ( self : List[str] ) -> Any:
__magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : str = True
__magic_name__ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'key_length' , _A )
__magic_name__ : Optional[int] = getattr(self.model_tester , 'key_length' , _A )
def check_decoder_attentions_output(_A : List[Any] ):
__magic_name__ : Tuple = len(_A )
self.assertEqual(out_len % 2 , 0 )
__magic_name__ : Any = outputs.decoder_attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_A : int ):
__magic_name__ : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = False
__magic_name__ : List[str] = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
__magic_name__ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
__magic_name__ : Any = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ : Optional[int] = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Optional[int] = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
__magic_name__ : str = True
__magic_name__ : str = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : str = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) )
self.assertEqual(model.config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : int ) -> int:
__magic_name__ : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__magic_name__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : Tuple = model(_A )[0]
__magic_name__ : str = [1, 6, 768]
self.assertEqual(output.shape , _A )
__magic_name__ : Tuple = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 ) | 331 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__lowerCamelCase : Optional[int] = logging.get_logger(__name__)
__lowerCamelCase : List[Any] = {
'''google/realm-cc-news-pretrained-embedder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-embedder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-encoder''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-encoder/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-scorer''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-scorer/resolve/main/config.json'''
),
'''google/realm-cc-news-pretrained-openqa''': (
'''https://huggingface.co/google/realm-cc-news-pretrained-openqa/aresolve/main/config.json'''
),
'''google/realm-orqa-nq-openqa''': '''https://huggingface.co/google/realm-orqa-nq-openqa/resolve/main/config.json''',
'''google/realm-orqa-nq-reader''': '''https://huggingface.co/google/realm-orqa-nq-reader/resolve/main/config.json''',
'''google/realm-orqa-wq-openqa''': '''https://huggingface.co/google/realm-orqa-wq-openqa/resolve/main/config.json''',
'''google/realm-orqa-wq-reader''': '''https://huggingface.co/google/realm-orqa-wq-reader/resolve/main/config.json''',
# See all REALM models at https://huggingface.co/models?filter=realm
}
class a__ ( A__ ):
A = 'realm'
def __init__( self : Optional[Any],_A : int=3_0522,_A : Union[str, Any]=768,_A : Dict=128,_A : Optional[int]=12,_A : List[str]=12,_A : Optional[int]=8,_A : Tuple=3072,_A : Optional[int]="gelu_new",_A : Tuple=0.1,_A : Optional[int]=0.1,_A : Optional[int]=512,_A : Optional[int]=2,_A : Any=0.02,_A : Tuple=1E-12,_A : Union[str, Any]=256,_A : int=10,_A : Tuple=1E-3,_A : Tuple=5,_A : List[str]=320,_A : List[Any]=1335_3718,_A : Optional[Any]=5000,_A : List[Any]=1,_A : int=0,_A : str=2,**_A : Optional[Any],):
"""simple docstring"""
super().__init__(pad_token_id=_A,bos_token_id=_A,eos_token_id=_A,**_A )
# Common config
SCREAMING_SNAKE_CASE_ : Any = vocab_size
SCREAMING_SNAKE_CASE_ : List[Any] = max_position_embeddings
SCREAMING_SNAKE_CASE_ : Any = hidden_size
SCREAMING_SNAKE_CASE_ : Any = retriever_proj_size
SCREAMING_SNAKE_CASE_ : Optional[Any] = num_hidden_layers
SCREAMING_SNAKE_CASE_ : Tuple = num_attention_heads
SCREAMING_SNAKE_CASE_ : str = num_candidates
SCREAMING_SNAKE_CASE_ : Tuple = intermediate_size
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_act
SCREAMING_SNAKE_CASE_ : Optional[int] = hidden_dropout_prob
SCREAMING_SNAKE_CASE_ : List[str] = attention_probs_dropout_prob
SCREAMING_SNAKE_CASE_ : Dict = initializer_range
SCREAMING_SNAKE_CASE_ : Dict = type_vocab_size
SCREAMING_SNAKE_CASE_ : Union[str, Any] = layer_norm_eps
# Reader config
SCREAMING_SNAKE_CASE_ : Optional[Any] = span_hidden_size
SCREAMING_SNAKE_CASE_ : Any = max_span_width
SCREAMING_SNAKE_CASE_ : int = reader_layer_norm_eps
SCREAMING_SNAKE_CASE_ : Dict = reader_beam_size
SCREAMING_SNAKE_CASE_ : Any = reader_seq_len
# Retrieval config
SCREAMING_SNAKE_CASE_ : int = num_block_records
SCREAMING_SNAKE_CASE_ : Tuple = searcher_beam_size
| 18 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase :Dict = pytest.mark.integration
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
__magic_name__ : Union[str, Any] = dset.map(
lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A )
__magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def __lowerCAmelCase ( self : Any ) -> str:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Tuple ) -> int:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
from elasticsearch import Elasticsearch
__magic_name__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : int = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__magic_name__ : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_A )
__magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
import faiss
__magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__magic_name__ : str = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Optional[int] = 1
__magic_name__ , __magic_name__ : str = index.search(_A )
self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1]
__magic_name__ , __magic_name__ : str = index.search_batch(_A )
self.assertRaises(_A , index.search_batch , queries[0] )
__magic_name__ : List[Any] = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _A )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
import faiss
__magic_name__ : str = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__magic_name__ : str = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_A ):
__magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
import faiss
__magic_name__ : Any = faiss.IndexFlat(5 )
__magic_name__ : Optional[Any] = FaissIndex(custom_index=_A )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
import faiss
__magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
index.save(tmp_file.name )
__magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ : Dict = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Tuple = 1
__magic_name__ , __magic_name__ : Optional[Any] = index.search(_A )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
import faiss
__magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__magic_name__ : Dict = 'index.faiss'
__magic_name__ : Optional[Any] = f'mock://{index_name}'
index.save(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
__magic_name__ : List[str] = 1
__magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> Dict:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : Any = Elasticsearch()
__magic_name__ : Union[str, Any] = {'acknowledged': True}
__magic_name__ : Tuple = ElasticSearchIndex(es_client=_A )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__magic_name__ : str = 'foo'
__magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__magic_name__ : str = 'foo'
__magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A )
__magic_name__ : Tuple = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A )
# batched queries with timeout
__magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 )
__magic_name__ : Optional[int] = [scores[0] for scores in total_scores]
__magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A ) | 331 | 0 |
def lowerCamelCase_ ( lowerCamelCase__ , lowerCamelCase__ ):
lowerCamelCase_ = [0 for i in range(r + 1 )]
# nc0 = 1
lowerCamelCase_ = 1
for i in range(1 , n + 1 ):
# to compute current row from previous row.
lowerCamelCase_ = min(lowerCamelCase__ , lowerCamelCase__ )
while j > 0:
c[j] += c[j - 1]
j -= 1
return c[r]
print(binomial_coefficient(n=1_0, r=5))
| 19 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : List[Any] = filter(lambda lowerCAmelCase : p.requires_grad , model.parameters() )
__magic_name__ : Tuple = sum([np.prod(p.size() ) for p in model_parameters] )
return params
lowerCAmelCase :Union[str, Any] = logging.getLogger(__name__)
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : int ):
"""simple docstring"""
if metric == "rouge2":
__magic_name__ : Any = '{val_avg_rouge2:.4f}-{step_count}'
elif metric == "bleu":
__magic_name__ : Optional[Any] = '{val_avg_bleu:.4f}-{step_count}'
elif metric == "em":
__magic_name__ : Dict = '{val_avg_em:.4f}-{step_count}'
elif metric == "loss":
__magic_name__ : int = '{val_avg_loss:.4f}-{step_count}'
else:
raise NotImplementedError(
f'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
' function.' )
__magic_name__ : List[Any] = ModelCheckpoint(
dirpath=lowerCAmelCase , filename=lowerCAmelCase , monitor=f'val_{metric}' , mode='max' , save_top_k=1 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Optional[Any] ):
"""simple docstring"""
return EarlyStopping(
monitor=f'val_{metric}' , mode='min' if 'loss' in metric else 'max' , patience=lowerCAmelCase , verbose=lowerCAmelCase , )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Optional[Any] , _A : List[str] ) -> int:
__magic_name__ : Optional[Any] = {F'lr_group_{i}': param['lr'] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_A )
@rank_zero_only
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule , _A : str , _A : Dict=True ) -> None:
logger.info(F'***** {type_path} results at step {trainer.global_step:05d} *****' )
__magic_name__ : List[str] = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ['log', 'progress_bar', 'preds']} )
# Log results
__magic_name__ : Optional[Any] = Path(pl_module.hparams.output_dir )
if type_path == "test":
__magic_name__ : List[Any] = od / 'test_results.txt'
__magic_name__ : Dict = od / 'test_generations.txt'
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
__magic_name__ : Dict = od / F'{type_path}_results/{trainer.global_step:05d}.txt'
__magic_name__ : Optional[Any] = od / F'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_A )
generations_file.parent.mkdir(exist_ok=_A )
with open(_A , 'a+' ) as writer:
for key in sorted(_A ):
if key in ["log", "progress_bar", "preds"]:
continue
__magic_name__ : Optional[Any] = metrics[key]
if isinstance(_A , torch.Tensor ):
__magic_name__ : Tuple = val.item()
__magic_name__ : int = F'{key}: {val:.6f}\n'
writer.write(_A )
if not save_generations:
return
if "preds" in metrics:
__magic_name__ : str = '\n'.join(metrics['preds'] )
generations_file.open('w+' ).write(_A )
@rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Union[str, Any] , _A : Tuple ) -> Tuple:
try:
__magic_name__ : str = pl_module.model.model.num_parameters()
except AttributeError:
__magic_name__ : List[str] = pl_module.model.num_parameters()
__magic_name__ : List[Any] = count_trainable_parameters(_A )
# mp stands for million parameters
trainer.logger.log_metrics({'n_params': npars, 'mp': npars / 1E6, 'grad_mp': n_trainable_pars / 1E6} )
@rank_zero_only
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
return self._write_logs(_A , _A , 'test' )
@rank_zero_only
def __lowerCAmelCase ( self : Tuple , _A : pl.Trainer , _A : Any ) -> List[Any]:
save_json(pl_module.metrics , pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid") | 331 | 0 |
import json
import os
import tempfile
import datasets
from utils import generate_example_dataset, get_duration
lowercase : Union[str, Any] = 50000
lowercase : Optional[int] = 5000
lowercase , lowercase : List[str] = os.path.split(__file__)
lowercase : int = os.path.join(RESULTS_BASEPATH, """results""", RESULTS_FILENAME.replace(""".py""", """.json"""))
@get_duration
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> List[str]:
for i in range(SCREAMING_SNAKE_CASE__ ):
lowercase : Optional[int] = dataset[i]
@get_duration
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Union[str, Any]:
for i in range(0 , len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ ):
lowercase : str = dataset[i : i + batch_size]
@get_duration
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Tuple:
with dataset.formatted_as(type=SCREAMING_SNAKE_CASE__ ):
for i in range(SCREAMING_SNAKE_CASE__ ):
lowercase : Any = dataset[i]
@get_duration
def _snake_case( SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) -> Optional[Any]:
with dataset.formatted_as(type=SCREAMING_SNAKE_CASE__ ):
for i in range(0 , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ):
lowercase : List[Any] = dataset[i : i + batch_size]
def _snake_case( ) -> str:
lowercase : Any = {"""num examples""": SPEED_TEST_N_EXAMPLES}
lowercase : int = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """pandas""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """torch""", """length""": SMALL_TEST}),
(read_formatted, {"""type""": """tensorflow""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
lowercase : List[Any] = [
(read, {"""length""": SMALL_TEST}),
(read, {"""length""": SPEED_TEST_N_EXAMPLES}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 10}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 100}),
(read_batch, {"""length""": SPEED_TEST_N_EXAMPLES, """batch_size""": 1_000}),
(read_formatted, {"""type""": """numpy""", """length""": SMALL_TEST}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 10}),
(read_formatted_batch, {"""type""": """numpy""", """length""": SMALL_TEST, """batch_size""": 1_000}),
]
with tempfile.TemporaryDirectory() as tmp_dir:
print("""generating dataset""" )
lowercase : int = datasets.Features(
{"""list""": datasets.Sequence(datasets.Value("""float32""" ) ), """numbers""": datasets.Value("""float32""" )} )
lowercase : List[Any] = generate_example_dataset(
os.path.join(SCREAMING_SNAKE_CASE__ , """dataset.arrow""" ) , SCREAMING_SNAKE_CASE__ , num_examples=SCREAMING_SNAKE_CASE__ , seq_shapes={"""list""": (100,)} , )
print("""first set of iterations""" )
for func, kwargs in functions:
print(func.__name__ , str(SCREAMING_SNAKE_CASE__ ) )
lowercase : Any = func(SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
print("""shuffling dataset""" )
lowercase : Tuple = dataset.shuffle()
print("""Second set of iterations (after shuffling""" )
for func, kwargs in functions_shuffled:
print("""shuffled """ , func.__name__ , str(SCREAMING_SNAKE_CASE__ ) )
lowercase : Dict = func(
SCREAMING_SNAKE_CASE__ , **SCREAMING_SNAKE_CASE__ )
with open(SCREAMING_SNAKE_CASE__ , """wb""" ) as f:
f.write(json.dumps(SCREAMING_SNAKE_CASE__ ).encode("""utf-8""" ) )
if __name__ == "__main__": # useful to run the profiler
benchmark_iterating()
| 20 |
'''simple docstring'''
def lowerCamelCase ( ):
"""simple docstring"""
return 1
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pence(x - 2 ) + one_pence()
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else five_pence(x - 5 ) + two_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else ten_pence(x - 10 ) + five_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else twenty_pence(x - 20 ) + ten_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else fifty_pence(x - 50 ) + twenty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else one_pound(x - 100 ) + fifty_pence(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int ):
"""simple docstring"""
return 0 if x < 0 else two_pound(x - 200 ) + one_pound(lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int = 200 ):
"""simple docstring"""
return two_pound(lowerCAmelCase )
if __name__ == "__main__":
print(solution(int(input().strip()))) | 331 | 0 |
def UpperCamelCase_( lowerCamelCase_ ) -> list:
if len(lowerCamelCase_ ) < 2:
return collection
def circle_sort_util(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ ) -> bool:
_lowercase : Any = False
if low == high:
return swapped
_lowercase : Union[str, Any] = low
_lowercase : List[Any] = high
while left < right:
if collection[left] > collection[right]:
_lowercase , _lowercase : Optional[int] = (
collection[right],
collection[left],
)
_lowercase : int = True
left += 1
right -= 1
if left == right and collection[left] > collection[right + 1]:
_lowercase , _lowercase : Optional[int] = (
collection[right + 1],
collection[left],
)
_lowercase : List[Any] = True
_lowercase : Tuple = low + int((high - low) / 2 )
_lowercase : Tuple = circle_sort_util(lowerCamelCase_ , lowerCamelCase_ , lowerCamelCase_ )
_lowercase : Any = circle_sort_util(lowerCamelCase_ , mid + 1 , lowerCamelCase_ )
return swapped or left_swap or right_swap
_lowercase : Union[str, Any] = True
while is_not_sorted is True:
_lowercase : Union[str, Any] = circle_sort_util(lowerCamelCase_ , 0 , len(lowerCamelCase_ ) - 1 )
return collection
if __name__ == "__main__":
SCREAMING_SNAKE_CASE : str = input("Enter numbers separated by a comma:\n").strip()
SCREAMING_SNAKE_CASE : int = [int(item) for item in user_input.split(",")]
print(circle_sort(unsorted))
| 21 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Dict , **_A : Any ) -> int:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : List[Any] , **_A : Any ) -> List[str]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : List[str] , *_A : Tuple , **_A : Optional[int] ) -> int:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Union[str, Any] = ["""flax""", """transformers"""]
def __init__( self : Union[str, Any] , *_A : Any , **_A : int ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Union[str, Any] , *_A : Optional[int] , **_A : Dict ) -> Optional[Any]:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Tuple , *_A : Any , **_A : Union[str, Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Dict = ["""flax""", """transformers"""]
def __init__( self : int , *_A : Optional[int] , **_A : Any ) -> List[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : int , **_A : str ) -> Any:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Optional[Any] , *_A : Union[str, Any] , **_A : List[str] ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] )
class _lowerCamelCase ( metaclass=lowercase__ ):
'''simple docstring'''
A_ : Optional[int] = ["""flax""", """transformers"""]
def __init__( self : Tuple , *_A : Dict , **_A : str ) -> Optional[Any]:
requires_backends(self , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : str , *_A : Dict , **_A : Optional[Any] ) -> Dict:
requires_backends(cls , ['flax', 'transformers'] )
@classmethod
def __lowerCAmelCase ( cls : Any , *_A : List[str] , **_A : str ) -> Optional[int]:
requires_backends(cls , ['flax', 'transformers'] ) | 331 | 0 |
'''simple docstring'''
from itertools import permutations
def UpperCAmelCase_ ( __lowercase : tuple ) -> bool:
'''simple docstring'''
if num[3] % 2 != 0:
return False
if (num[2] + num[3] + num[4]) % 3 != 0:
return False
if num[5] % 5 != 0:
return False
_UpperCAmelCase = [7, 11, 13, 17]
for i, test in enumerate(__lowercase ):
if (num[i + 4] * 100 + num[i + 5] * 10 + num[i + 6]) % test != 0:
return False
return True
def UpperCAmelCase_ ( __lowercase : int = 10 ) -> int:
'''simple docstring'''
return sum(
int("".join(map(__lowercase , __lowercase ) ) )
for num in permutations(range(__lowercase ) )
if is_substring_divisible(__lowercase ) )
if __name__ == "__main__":
print(F"{solution() = }")
| 22 |
'''simple docstring'''
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_VISION_2_SEQ_MAPPING
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_VISION_2_SEQ_MAPPING
lowerCAmelCase :Tuple = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , *_A : Optional[Any] , **_A : List[Any] ) -> Any:
super().__init__(*_A , **_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_VISION_2_SEQ_MAPPING if self.framework == 'tf' else MODEL_FOR_VISION_2_SEQ_MAPPING )
def __lowerCAmelCase ( self : str , _A : Any=None , _A : Union[str, Any]=None , _A : Union[str, Any]=None ) -> List[str]:
__magic_name__ : Union[str, Any] = {}
__magic_name__ : Optional[Any] = {}
if prompt is not None:
__magic_name__ : Union[str, Any] = prompt
if generate_kwargs is not None:
__magic_name__ : str = generate_kwargs
if max_new_tokens is not None:
if "generate_kwargs" not in forward_kwargs:
__magic_name__ : Union[str, Any] = {}
if "max_new_tokens" in forward_kwargs["generate_kwargs"]:
raise ValueError(
'\'max_new_tokens\' is defined twice, once in \'generate_kwargs\' and once as a direct parameter,'
' please use only one' )
__magic_name__ : Optional[Any] = max_new_tokens
return preprocess_params, forward_kwargs, {}
def __call__( self : Optional[Any] , _A : Union[str, List[str], "Image.Image", List["Image.Image"]] , **_A : List[Any] ) -> int:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : Optional[int]=None ) -> Dict:
__magic_name__ : List[Any] = load_image(_A )
if prompt is not None:
if not isinstance(_A , _A ):
raise ValueError(
F'Received an invalid text input, got - {type(_A )} - but expected a single string. '
'Note also that one single text can be provided for conditional image to text generation.' )
__magic_name__ : Any = self.model.config.model_type
if model_type == "git":
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(text=_A , add_special_tokens=_A ).input_ids
__magic_name__ : str = [self.tokenizer.cls_token_id] + input_ids
__magic_name__ : List[Any] = torch.tensor(_A ).unsqueeze(0 )
model_inputs.update({'input_ids': input_ids} )
elif model_type == "pix2struct":
__magic_name__ : Dict = self.image_processor(images=_A , header_text=_A , return_tensors=self.framework )
elif model_type != "vision-encoder-decoder":
# vision-encoder-decoder does not support conditional generation
__magic_name__ : int = self.image_processor(images=_A , return_tensors=self.framework )
__magic_name__ : List[str] = self.tokenizer(_A , return_tensors=self.framework )
model_inputs.update(_A )
else:
raise ValueError(F'Model type {model_type} does not support conditional text generation' )
else:
__magic_name__ : Optional[Any] = self.image_processor(images=_A , return_tensors=self.framework )
if self.model.config.model_type == "git" and prompt is None:
__magic_name__ : int = None
return model_inputs
def __lowerCAmelCase ( self : List[Any] , _A : Tuple , _A : List[str]=None ) -> Any:
# Git model sets `model_inputs["input_ids"] = None` in `preprocess` (when `prompt=None`). In batch model, the
# pipeline will group them into a list of `None`, which fail `_forward`. Avoid this by checking it first.
if (
"input_ids" in model_inputs
and isinstance(model_inputs['input_ids'] , _A )
and all(x is None for x in model_inputs['input_ids'] )
):
__magic_name__ : str = None
if generate_kwargs is None:
__magic_name__ : Optional[int] = {}
# FIXME: We need to pop here due to a difference in how `generation.py` and `generation.tf_utils.py`
# parse inputs. In the Tensorflow version, `generate` raises an error if we don't use `input_ids` whereas
# the PyTorch version matches it with `self.model.main_input_name` or `self.model.encoder.main_input_name`
# in the `_prepare_model_inputs` method.
__magic_name__ : Optional[Any] = model_inputs.pop(self.model.main_input_name )
__magic_name__ : Union[str, Any] = self.model.generate(_A , **_A , **_A )
return model_outputs
def __lowerCAmelCase ( self : List[str] , _A : Tuple ) -> Optional[Any]:
__magic_name__ : Optional[Any] = []
for output_ids in model_outputs:
__magic_name__ : Union[str, Any] = {
'generated_text': self.tokenizer.decode(
_A , skip_special_tokens=_A , )
}
records.append(_A )
return records | 331 | 0 |
'''simple docstring'''
import unittest
from transformers import MraConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor, random_attention_mask
if is_torch_available():
import torch
from transformers import (
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
MraModel,
)
from transformers.models.mra.modeling_mra import MRA_PRETRAINED_MODEL_ARCHIVE_LIST
class SCREAMING_SNAKE_CASE:
"""simple docstring"""
def __init__( self : Optional[int] , __snake_case : str , __snake_case : Union[str, Any]=2 , __snake_case : Optional[int]=8 , __snake_case : Any=True , __snake_case : Union[str, Any]=True , __snake_case : Dict=True , __snake_case : int=True , __snake_case : List[Any]=99 , __snake_case : str=16 , __snake_case : Tuple=5 , __snake_case : Tuple=2 , __snake_case : str=36 , __snake_case : Dict="gelu" , __snake_case : str=0.0 , __snake_case : Optional[int]=0.0 , __snake_case : Optional[int]=512 , __snake_case : Optional[Any]=16 , __snake_case : int=2 , __snake_case : int=0.02 , __snake_case : str=3 , __snake_case : Dict=4 , __snake_case : str=None , ) -> Optional[int]:
UpperCAmelCase : Optional[int] = parent
UpperCAmelCase : Tuple = batch_size
UpperCAmelCase : List[str] = seq_length
UpperCAmelCase : List[Any] = is_training
UpperCAmelCase : int = use_input_mask
UpperCAmelCase : Any = use_token_type_ids
UpperCAmelCase : str = use_labels
UpperCAmelCase : Union[str, Any] = vocab_size
UpperCAmelCase : List[str] = hidden_size
UpperCAmelCase : Optional[Any] = num_hidden_layers
UpperCAmelCase : Union[str, Any] = num_attention_heads
UpperCAmelCase : Optional[Any] = intermediate_size
UpperCAmelCase : Union[str, Any] = hidden_act
UpperCAmelCase : int = hidden_dropout_prob
UpperCAmelCase : Optional[int] = attention_probs_dropout_prob
UpperCAmelCase : Union[str, Any] = max_position_embeddings
UpperCAmelCase : str = type_vocab_size
UpperCAmelCase : List[str] = type_sequence_label_size
UpperCAmelCase : Tuple = initializer_range
UpperCAmelCase : Optional[Any] = num_labels
UpperCAmelCase : Optional[int] = num_choices
UpperCAmelCase : Any = scope
def A ( self : Tuple ) -> Union[str, Any]:
UpperCAmelCase : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
UpperCAmelCase : Optional[int] = None
if self.use_input_mask:
UpperCAmelCase : Dict = random_attention_mask([self.batch_size, self.seq_length] )
UpperCAmelCase : Dict = None
if self.use_token_type_ids:
UpperCAmelCase : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
UpperCAmelCase : str = None
UpperCAmelCase : Tuple = None
UpperCAmelCase : int = None
if self.use_labels:
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.type_sequence_label_size )
UpperCAmelCase : Dict = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
UpperCAmelCase : str = ids_tensor([self.batch_size] , self.num_choices )
UpperCAmelCase : List[str] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def A ( self : int ) -> Tuple:
return MraConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , is_decoder=__snake_case , initializer_range=self.initializer_range , )
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Optional[Any] = self.get_config()
UpperCAmelCase : int = 300
return config
def A ( self : Optional[Any] ) -> Any:
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : Tuple = self.prepare_config_and_inputs()
UpperCAmelCase : Dict = True
UpperCAmelCase : Tuple = floats_tensor([self.batch_size, self.seq_length, self.hidden_size] )
UpperCAmelCase : str = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
encoder_hidden_states,
encoder_attention_mask,
)
def A ( self : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : int , __snake_case : Optional[int] , __snake_case : int , __snake_case : Dict , __snake_case : Tuple , __snake_case : Optional[Any] ) -> List[str]:
UpperCAmelCase : int = MraModel(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Tuple = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
UpperCAmelCase : Optional[int] = model(__snake_case , token_type_ids=__snake_case )
UpperCAmelCase : Dict = model(__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : List[str] , __snake_case : int , __snake_case : Union[str, Any] , __snake_case : Optional[Any] , __snake_case : Any , __snake_case : List[Any] , __snake_case : Optional[Any] , ) -> Tuple:
UpperCAmelCase : str = True
UpperCAmelCase : Tuple = MraModel(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , encoder_attention_mask=__snake_case , )
UpperCAmelCase : Optional[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , encoder_hidden_states=__snake_case , )
UpperCAmelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def A ( self : Tuple , __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : Tuple , __snake_case : Optional[Any] , __snake_case : List[str] , __snake_case : int ) -> Any:
UpperCAmelCase : Dict = MraForMaskedLM(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : Optional[int] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def A ( self : Tuple , __snake_case : Tuple , __snake_case : Dict , __snake_case : Dict , __snake_case : Any , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Tuple ) -> Optional[int]:
UpperCAmelCase : List[str] = MraForQuestionAnswering(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : List[Any] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , start_positions=__snake_case , end_positions=__snake_case , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def A ( self : str , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : str , __snake_case : int , __snake_case : Optional[Any] , __snake_case : Union[str, Any] , __snake_case : List[Any] ) -> int:
UpperCAmelCase : int = self.num_labels
UpperCAmelCase : Union[str, Any] = MraForSequenceClassification(__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : List[str] = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def A ( self : str , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Union[str, Any] , __snake_case : Dict ) -> int:
UpperCAmelCase : Tuple = self.num_labels
UpperCAmelCase : List[str] = MraForTokenClassification(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : str = model(__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def A ( self : str , __snake_case : int , __snake_case : Any , __snake_case : Tuple , __snake_case : Optional[int] , __snake_case : List[str] , __snake_case : str , __snake_case : Union[str, Any] ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.num_choices
UpperCAmelCase : int = MraForMultipleChoice(config=__snake_case )
model.to(__snake_case )
model.eval()
UpperCAmelCase : str = input_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[Any] = token_type_ids.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[str] = input_mask.unsqueeze(1 ).expand(-1 , self.num_choices , -1 ).contiguous()
UpperCAmelCase : List[str] = model(
__snake_case , attention_mask=__snake_case , token_type_ids=__snake_case , labels=__snake_case , )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def A ( self : str ) -> Dict:
UpperCAmelCase : Any = self.prepare_config_and_inputs()
(
(
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) , (
UpperCAmelCase
) ,
) : List[str] = config_and_inputs
UpperCAmelCase : Any = {'''input_ids''': input_ids, '''token_type_ids''': token_type_ids, '''attention_mask''': input_mask}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE( A__ , unittest.TestCase ):
"""simple docstring"""
lowerCamelCase__ = (
(
MraModel,
MraForMaskedLM,
MraForMultipleChoice,
MraForQuestionAnswering,
MraForSequenceClassification,
MraForTokenClassification,
)
if is_torch_available()
else ()
)
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = False
lowerCamelCase__ = ()
def A ( self : int ) -> Union[str, Any]:
UpperCAmelCase : List[str] = MraModelTester(self )
UpperCAmelCase : Optional[int] = ConfigTester(self , config_class=__snake_case , hidden_size=37 )
def A ( self : Optional[Any] ) -> str:
self.config_tester.run_common_tests()
def A ( self : Tuple ) -> Optional[Any]:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*__snake_case )
def A ( self : List[Any] ) -> Optional[Any]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
UpperCAmelCase : List[Any] = type
self.model_tester.create_and_check_model(*__snake_case )
def A ( self : Tuple ) -> Dict:
UpperCAmelCase : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*__snake_case )
def A ( self : Tuple ) -> List[str]:
UpperCAmelCase : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*__snake_case )
def A ( self : int ) -> Dict:
UpperCAmelCase : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*__snake_case )
def A ( self : Dict ) -> Optional[int]:
UpperCAmelCase : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*__snake_case )
def A ( self : Any ) -> Optional[int]:
UpperCAmelCase : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*__snake_case )
@slow
def A ( self : Dict ) -> Any:
for model_name in MRA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
UpperCAmelCase : str = MraModel.from_pretrained(__snake_case )
self.assertIsNotNone(__snake_case )
@unittest.skip(reason='''MRA does not output attentions''' )
def A ( self : str ) -> Optional[Any]:
return
@require_torch
class SCREAMING_SNAKE_CASE( unittest.TestCase ):
"""simple docstring"""
@slow
def A ( self : Tuple ) -> List[Any]:
UpperCAmelCase : int = MraModel.from_pretrained('''uw-madison/mra-base-512-4''' )
UpperCAmelCase : Optional[Any] = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(__snake_case )[0]
UpperCAmelCase : Optional[Any] = torch.Size((1, 256, 768) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Any = torch.tensor(
[[[-0.01_40, 0.08_30, -0.03_81], [0.15_46, 0.14_02, 0.02_20], [0.11_62, 0.08_51, 0.01_65]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def A ( self : Optional[Any] ) -> Any:
UpperCAmelCase : Optional[int] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-512-4''' )
UpperCAmelCase : Dict = torch.arange(256 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : List[Any] = model(__snake_case )[0]
UpperCAmelCase : int = 50265
UpperCAmelCase : int = torch.Size((1, 256, vocab_size) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Union[str, Any] = torch.tensor(
[[[9.25_95, -3.60_38, 11.88_19], [9.38_69, -3.26_93, 11.09_56], [11.85_24, -3.49_38, 13.12_10]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
@slow
def A ( self : str ) -> List[Any]:
UpperCAmelCase : List[Any] = MraForMaskedLM.from_pretrained('''uw-madison/mra-base-4096-8-d3''' )
UpperCAmelCase : List[Any] = torch.arange(4096 ).unsqueeze(0 )
with torch.no_grad():
UpperCAmelCase : Tuple = model(__snake_case )[0]
UpperCAmelCase : Optional[int] = 50265
UpperCAmelCase : Tuple = torch.Size((1, 4096, vocab_size) )
self.assertEqual(output.shape , __snake_case )
UpperCAmelCase : Optional[int] = torch.tensor(
[[[5.47_89, -2.35_64, 7.50_64], [7.90_67, -1.33_69, 9.96_68], [9.07_12, -1.81_06, 7.03_80]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] , __snake_case , atol=1E-4 ) )
| 23 |
'''simple docstring'''
import argparse
import logging
import os
from pathlib import Path
from typing import Any, Dict
import pytorch_lightning as pl
from pytorch_lightning.utilities import rank_zero_info
from transformers import (
AdamW,
AutoConfig,
AutoModel,
AutoModelForPreTraining,
AutoModelForQuestionAnswering,
AutoModelForSeqaSeqLM,
AutoModelForSequenceClassification,
AutoModelForTokenClassification,
AutoModelWithLMHead,
AutoTokenizer,
PretrainedConfig,
PreTrainedTokenizer,
)
from transformers.optimization import (
Adafactor,
get_cosine_schedule_with_warmup,
get_cosine_with_hard_restarts_schedule_with_warmup,
get_linear_schedule_with_warmup,
get_polynomial_decay_schedule_with_warmup,
)
from transformers.utils.versions import require_version
lowerCAmelCase :Dict = logging.getLogger(__name__)
require_version('''pytorch_lightning>=1.0.4''')
lowerCAmelCase :str = {
'''base''': AutoModel,
'''sequence-classification''': AutoModelForSequenceClassification,
'''question-answering''': AutoModelForQuestionAnswering,
'''pretraining''': AutoModelForPreTraining,
'''token-classification''': AutoModelForTokenClassification,
'''language-modeling''': AutoModelWithLMHead,
'''summarization''': AutoModelForSeqaSeqLM,
'''translation''': AutoModelForSeqaSeqLM,
}
# update this and the import above to support new schedulers from transformers.optimization
lowerCAmelCase :Any = {
'''linear''': get_linear_schedule_with_warmup,
'''cosine''': get_cosine_schedule_with_warmup,
'''cosine_w_restarts''': get_cosine_with_hard_restarts_schedule_with_warmup,
'''polynomial''': get_polynomial_decay_schedule_with_warmup,
# '': get_constant_schedule, # not supported for now
# '': get_constant_schedule_with_warmup, # not supported for now
}
lowerCAmelCase :Tuple = sorted(arg_to_scheduler.keys())
lowerCAmelCase :Any = '''{''' + ''', '''.join(arg_to_scheduler_choices) + '''}'''
class _lowerCamelCase ( pl.LightningModule ):
'''simple docstring'''
def __init__( self : Union[str, Any] , _A : argparse.Namespace , _A : List[Any]=None , _A : Any="base" , _A : Tuple=None , _A : Union[str, Any]=None , _A : List[Any]=None , **_A : Optional[Any] , ) -> Optional[int]:
super().__init__()
# TODO: move to self.save_hyperparameters()
# self.save_hyperparameters()
# can also expand arguments into trainer signature for easier reading
self.save_hyperparameters(_A )
__magic_name__ : List[str] = 0
__magic_name__ : Union[str, Any] = Path(self.hparams.output_dir )
__magic_name__ : str = self.hparams.cache_dir if self.hparams.cache_dir else None
if config is None:
__magic_name__ : Optional[Any] = AutoConfig.from_pretrained(
self.hparams.config_name if self.hparams.config_name else self.hparams.model_name_or_path , **({'num_labels': num_labels} if num_labels is not None else {}) , cache_dir=_A , **_A , )
else:
__magic_name__ : PretrainedConfig = config
__magic_name__ : Any = ('encoder_layerdrop', 'decoder_layerdrop', 'dropout', 'attention_dropout')
for p in extra_model_params:
if getattr(self.hparams , _A , _A ):
assert hasattr(self.config , _A ), F'model config doesn\'t have a `{p}` attribute'
setattr(self.config , _A , getattr(self.hparams , _A ) )
if tokenizer is None:
__magic_name__ : List[Any] = AutoTokenizer.from_pretrained(
self.hparams.tokenizer_name if self.hparams.tokenizer_name else self.hparams.model_name_or_path , cache_dir=_A , )
else:
__magic_name__ : PreTrainedTokenizer = tokenizer
__magic_name__ : Optional[int] = MODEL_MODES[mode]
if model is None:
__magic_name__ : Tuple = self.model_type.from_pretrained(
self.hparams.model_name_or_path , from_tf=bool('.ckpt' in self.hparams.model_name_or_path ) , config=self.config , cache_dir=_A , )
else:
__magic_name__ : str = model
def __lowerCAmelCase ( self : Optional[int] , *_A : Union[str, Any] , **_A : Union[str, Any] ) -> Tuple:
__magic_name__ : Any = self.model_type.from_pretrained(*_A , **_A )
def __lowerCAmelCase ( self : Dict ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = arg_to_scheduler[self.hparams.lr_scheduler]
__magic_name__ : str = get_schedule_func(
self.opt , num_warmup_steps=self.hparams.warmup_steps , num_training_steps=self.total_steps() )
__magic_name__ : int = {'scheduler': scheduler, 'interval': 'step', 'frequency': 1}
return scheduler
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : Optional[Any] = self.model
__magic_name__ : int = ['bias', 'LayerNorm.weight']
__magic_name__ : Dict = [
{
'params': [
p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay )
], # check this named paramters
'weight_decay': self.hparams.weight_decay,
},
{
'params': [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay )],
'weight_decay': 0.0,
},
]
if self.hparams.adafactor:
__magic_name__ : str = Adafactor(
_A , lr=self.hparams.learning_rate , scale_parameter=_A , relative_step=_A )
else:
__magic_name__ : Tuple = AdamW(
_A , lr=self.hparams.learning_rate , eps=self.hparams.adam_epsilon )
__magic_name__ : List[str] = optimizer
__magic_name__ : int = self.get_lr_scheduler()
return [optimizer], [scheduler]
def __lowerCAmelCase ( self : Optional[Any] , _A : Optional[int] , _A : Tuple ) -> Optional[Any]:
return self.validation_step(_A , _A )
def __lowerCAmelCase ( self : Dict , _A : List[str] ) -> Any:
return self.validation_end(_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> int:
__magic_name__ : int = max(1 , self.hparams.gpus ) # TODO: consider num_tpu_cores
__magic_name__ : Dict = self.hparams.train_batch_size * self.hparams.accumulate_grad_batches * num_devices
return (self.dataset_size / effective_batch_size) * self.hparams.max_epochs
def __lowerCAmelCase ( self : str , _A : Optional[int] ) -> str:
if stage == "test":
__magic_name__ : Any = len(self.test_dataloader().dataset )
else:
__magic_name__ : List[Any] = self.get_dataloader('train' , self.hparams.train_batch_size , shuffle=_A )
__magic_name__ : int = len(self.train_dataloader().dataset )
def __lowerCAmelCase ( self : List[str] , _A : str , _A : int , _A : bool = False ) -> Optional[int]:
raise NotImplementedError('You must implement this for your task' )
def __lowerCAmelCase ( self : int ) -> List[str]:
return self.train_loader
def __lowerCAmelCase ( self : Tuple ) -> int:
return self.get_dataloader('dev' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[Any]:
return self.get_dataloader('test' , self.hparams.eval_batch_size , shuffle=_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : Any ) -> str:
return os.path.join(
self.hparams.data_dir , 'cached_{}_{}_{}'.format(
_A , list(filter(_A , self.hparams.model_name_or_path.split('/' ) ) ).pop() , str(self.hparams.max_seq_length ) , ) , )
@pl.utilities.rank_zero_only
def __lowerCAmelCase ( self : List[str] , _A : Dict[str, Any] ) -> None:
__magic_name__ : Dict = self.output_dir.joinpath('best_tfmr' )
__magic_name__ : List[Any] = self.step_count
self.model.save_pretrained(_A )
self.tokenizer.save_pretrained(_A )
@staticmethod
def __lowerCAmelCase ( _A : List[str] , _A : Optional[Any] ) -> Tuple:
parser.add_argument(
'--model_name_or_path' , default=_A , type=_A , required=_A , help='Path to pretrained model or model identifier from huggingface.co/models' , )
parser.add_argument(
'--config_name' , default='' , type=_A , help='Pretrained config name or path if not the same as model_name' )
parser.add_argument(
'--tokenizer_name' , default=_A , type=_A , help='Pretrained tokenizer name or path if not the same as model_name' , )
parser.add_argument(
'--cache_dir' , default=str(Path(_A ).parent / 'test_run' / 'cache' ) , type=_A , help='Where do you want to store the pre-trained models downloaded from huggingface.co' , )
parser.add_argument(
'--encoder_layerdrop' , type=_A , help='Encoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--decoder_layerdrop' , type=_A , help='Decoder layer dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--dropout' , type=_A , help='Dropout probability (Optional). Goes into model.config' , )
parser.add_argument(
'--attention_dropout' , type=_A , help='Attention dropout probability (Optional). Goes into model.config' , )
parser.add_argument('--learning_rate' , default=5E-5 , type=_A , help='The initial learning rate for Adam.' )
parser.add_argument(
'--lr_scheduler' , default='linear' , choices=_A , metavar=_A , type=_A , help='Learning rate scheduler' , )
parser.add_argument('--weight_decay' , default=0.0 , type=_A , help='Weight decay if we apply some.' )
parser.add_argument('--adam_epsilon' , default=1E-8 , type=_A , help='Epsilon for Adam optimizer.' )
parser.add_argument('--warmup_steps' , default=0 , type=_A , help='Linear warmup over warmup_steps.' )
parser.add_argument('--num_workers' , default=4 , type=_A , help='kwarg passed to DataLoader' )
parser.add_argument('--num_train_epochs' , dest='max_epochs' , default=3 , type=_A )
parser.add_argument('--train_batch_size' , default=32 , type=_A )
parser.add_argument('--eval_batch_size' , default=32 , type=_A )
parser.add_argument('--adafactor' , action='store_true' )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : List[Any] , _A : List[Any] ) -> List[str]:
if (
trainer.is_global_zero and trainer.global_rank == 0
): # we initialize the retriever only on master worker with RAY. In new pytorch-lightning accelorators are removed.
pl_module.model.rag.retriever.init_retrieval() # better to use hook functions.
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : List[str] , _A : Dict , _A : str ) -> List[str]:
# print(pl_module.model.rag)
for name, param in pl_module.model.rag.named_parameters():
if param.grad is None:
print(_A )
class _lowerCamelCase ( pl.Callback ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Dict ) -> Optional[Any]:
__magic_name__ : Dict = trainer.lr_schedulers[0]['scheduler']
__magic_name__ : int = {F'lr_group_{i}': lr for i, lr in enumerate(lr_scheduler.get_lr() )}
pl_module.logger.log_metrics(_A )
def __lowerCAmelCase ( self : Any , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[int]:
rank_zero_info('***** Validation results *****' )
__magic_name__ : str = trainer.callback_metrics
# Log results
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def __lowerCAmelCase ( self : Union[str, Any] , _A : pl.Trainer , _A : pl.LightningModule ) -> Optional[Any]:
rank_zero_info('***** Test results *****' )
__magic_name__ : Optional[int] = trainer.callback_metrics
# Log and save results to file
__magic_name__ : Optional[Any] = os.path.join(pl_module.hparams.output_dir , 'test_results.txt' )
with open(_A , 'w' ) as writer:
for key in sorted(_A ):
if key not in ["log", "progress_bar"]:
rank_zero_info('{} = {}\n'.format(_A , str(metrics[key] ) ) )
writer.write('{} = {}\n'.format(_A , str(metrics[key] ) ) )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
parser.add_argument(
'--output_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'model_checkpoints' ) , type=lowerCAmelCase , help='The output directory where the model predictions and checkpoints will be written.' , )
parser.add_argument(
'--fp16' , action='store_true' , help='Whether to use 16-bit (mixed) precision (through NVIDIA apex) instead of 32-bit' , )
parser.add_argument(
'--fp16_opt_level' , type=lowerCAmelCase , default='O2' , help=(
'For fp16: Apex AMP optimization level selected in [\'O0\', \'O1\', \'O2\', and \'O3\'].'
'See details at https://nvidia.github.io/apex/amp.html'
) , )
parser.add_argument('--n_tpu_cores' , dest='tpu_cores' , type=lowerCAmelCase )
parser.add_argument('--max_grad_norm' , dest='gradient_clip_val' , default=1.0 , type=lowerCAmelCase , help='Max gradient norm' )
parser.add_argument('--do_train' , action='store_true' , help='Whether to run training.' )
parser.add_argument('--do_predict' , action='store_true' , help='Whether to run predictions on the test set.' )
parser.add_argument(
'--gradient_accumulation_steps' , dest='accumulate_grad_batches' , type=lowerCAmelCase , default=1 , help='Number of updates steps to accumulate before performing a backward/update pass.' , )
parser.add_argument('--seed' , type=lowerCAmelCase , default=42 , help='random seed for initialization' )
parser.add_argument(
'--data_dir' , default=str(Path(lowerCAmelCase ).parent / 'test_run' / 'dummy-train-data' ) , type=lowerCAmelCase , help='The input data dir. Should contain the training files for the CoNLL-2003 NER task.' , )
def lowerCamelCase ( lowerCAmelCase : BaseTransformer , lowerCAmelCase : argparse.Namespace , lowerCAmelCase : List[Any]=None , lowerCAmelCase : Optional[Any]=True , lowerCAmelCase : Optional[Any]=[] , lowerCAmelCase : Union[str, Any]=None , lowerCAmelCase : Any=None , **lowerCAmelCase : Union[str, Any] , ):
"""simple docstring"""
pl.seed_everything(args.seed )
# init model
__magic_name__ : Any = Path(model.hparams.output_dir )
odir.mkdir(exist_ok=lowerCAmelCase )
# add custom checkpoints
if checkpoint_callback is None:
__magic_name__ : List[Any] = pl.callbacks.ModelCheckpoint(
filepath=args.output_dir , prefix='checkpoint' , monitor='val_loss' , mode='min' , save_top_k=1 )
if early_stopping_callback:
extra_callbacks.append(lowerCAmelCase )
if logging_callback is None:
__magic_name__ : Dict = LoggingCallback()
__magic_name__ : List[str] = {}
if args.fpaa:
__magic_name__ : Dict = 16
if args.gpus > 1:
__magic_name__ : Tuple = 'auto'
__magic_name__ : int = 'ddp'
__magic_name__ : str = args.accumulate_grad_batches
__magic_name__ : str = None
__magic_name__ : List[str] = 'auto'
__magic_name__ : List[Any] = pl.Trainer.from_argparse_args(
lowerCAmelCase , weights_summary=lowerCAmelCase , callbacks=[logging_callback] + extra_callbacks + [InitCallback()] + [checkpoint_callback] , logger=lowerCAmelCase , val_check_interval=1 , num_sanity_val_steps=2 , **lowerCAmelCase , )
if args.do_train:
trainer.fit(lowerCAmelCase )
else:
print('RAG modeling tests with new set functions successfuly executed!' )
return trainer | 331 | 0 |
import requests
def lowerCamelCase__ ( snake_case_ : str , snake_case_ : str ) -> None:
__snake_case = {'''Content-Type''': '''application/json'''}
__snake_case = requests.post(snake_case_ , json={'''text''': message_body} , headers=snake_case_ )
if response.status_code != 200:
__snake_case = (
'''Request to slack returned an error '''
f"""{response.status_code}, the response is:\n{response.text}"""
)
raise ValueError(snake_case_ )
if __name__ == "__main__":
# Set the slack url to the one provided by Slack when you create the webhook at
# https://my.slack.com/services/new/incoming-webhook/
send_slack_message('<YOUR MESSAGE BODY>', '<SLACK CHANNEL URL>')
| 24 |
'''simple docstring'''
import torch
from diffusers import DDPMScheduler
from .test_schedulers import SchedulerCommonTest
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Dict = (DDPMScheduler,)
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> str:
__magic_name__ : str = {
'num_train_timesteps': 1000,
'beta_start': 0.0001,
'beta_end': 0.02,
'beta_schedule': 'linear',
'variance_type': 'fixed_small',
'clip_sample': True,
}
config.update(**_A )
return config
def __lowerCAmelCase ( self : str ) -> Union[str, Any]:
for timesteps in [1, 5, 100, 1000]:
self.check_over_configs(num_train_timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
for beta_start, beta_end in zip([0.0001, 0.001, 0.01, 0.1] , [0.002, 0.02, 0.2, 2] ):
self.check_over_configs(beta_start=_A , beta_end=_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[Any]:
for schedule in ["linear", "squaredcos_cap_v2"]:
self.check_over_configs(beta_schedule=_A )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for variance in ["fixed_small", "fixed_large", "other"]:
self.check_over_configs(variance_type=_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
for clip_sample in [True, False]:
self.check_over_configs(clip_sample=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> str:
self.check_over_configs(thresholding=_A )
for threshold in [0.5, 1.0, 2.0]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(
thresholding=_A , prediction_type=_A , sample_max_value=_A , )
def __lowerCAmelCase ( self : Tuple ) -> List[str]:
for prediction_type in ["epsilon", "sample", "v_prediction"]:
self.check_over_configs(prediction_type=_A )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[str]:
for t in [0, 500, 999]:
self.check_over_forward(time_step=_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Dict = scheduler_class(**_A )
assert torch.sum(torch.abs(scheduler._get_variance(0 ) - 0.0 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(487 ) - 0.0_0979 ) ) < 1E-5
assert torch.sum(torch.abs(scheduler._get_variance(999 ) - 0.02 ) ) < 1E-5
def __lowerCAmelCase ( self : Tuple ) -> int:
__magic_name__ : Tuple = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : str = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Union[str, Any] = self.dummy_model()
__magic_name__ : List[Any] = self.dummy_sample_deter
__magic_name__ : Optional[Any] = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : Tuple = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Union[str, Any] = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : Dict = pred_prev_sample
__magic_name__ : Union[str, Any] = torch.sum(torch.abs(_A ) )
__magic_name__ : Dict = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 258.9606 ) < 1E-2
assert abs(result_mean.item() - 0.3372 ) < 1E-3
def __lowerCAmelCase ( self : Tuple ) -> Optional[int]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config(prediction_type='v_prediction' )
__magic_name__ : Any = scheduler_class(**_A )
__magic_name__ : Any = len(_A )
__magic_name__ : Dict = self.dummy_model()
__magic_name__ : str = self.dummy_sample_deter
__magic_name__ : str = torch.manual_seed(0 )
for t in reversed(range(_A ) ):
# 1. predict noise residual
__magic_name__ : List[Any] = model(_A , _A )
# 2. predict previous mean of sample x_t-1
__magic_name__ : Tuple = scheduler.step(_A , _A , _A , generator=_A ).prev_sample
# if t > 0:
# noise = self.dummy_sample_deter
# variance = scheduler.get_variance(t) ** (0.5) * noise
#
# sample = pred_prev_sample + variance
__magic_name__ : List[Any] = pred_prev_sample
__magic_name__ : int = torch.sum(torch.abs(_A ) )
__magic_name__ : Any = torch.mean(torch.abs(_A ) )
assert abs(result_sum.item() - 202.0296 ) < 1E-2
assert abs(result_mean.item() - 0.2631 ) < 1E-3
def __lowerCAmelCase ( self : List[str] ) -> str:
__magic_name__ : Dict = self.scheduler_classes[0]
__magic_name__ : Any = self.get_scheduler_config()
__magic_name__ : Optional[Any] = scheduler_class(**_A )
__magic_name__ : List[str] = [100, 87, 50, 1, 0]
scheduler.set_timesteps(timesteps=_A )
__magic_name__ : List[str] = scheduler.timesteps
for i, timestep in enumerate(_A ):
if i == len(_A ) - 1:
__magic_name__ : Optional[int] = -1
else:
__magic_name__ : List[Any] = timesteps[i + 1]
__magic_name__ : Union[str, Any] = scheduler.previous_timestep(_A )
__magic_name__ : Any = prev_t.item()
self.assertEqual(_A , _A )
def __lowerCAmelCase ( self : Tuple ) -> str:
__magic_name__ : str = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 51, 0]
with self.assertRaises(_A , msg='`custom_timesteps` must be in descending order.' ):
scheduler.set_timesteps(timesteps=_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : Union[str, Any] = self.scheduler_classes[0]
__magic_name__ : Union[str, Any] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Optional[int] = [100, 87, 50, 1, 0]
__magic_name__ : Tuple = len(_A )
with self.assertRaises(_A , msg='Can only pass one of `num_inference_steps` or `custom_timesteps`.' ):
scheduler.set_timesteps(num_inference_steps=_A , timesteps=_A )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.scheduler_classes[0]
__magic_name__ : List[str] = self.get_scheduler_config()
__magic_name__ : Union[str, Any] = scheduler_class(**_A )
__magic_name__ : Tuple = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_A , msg='`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}' , ):
scheduler.set_timesteps(timesteps=_A ) | 331 | 0 |
"""simple docstring"""
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_tf_available, is_torch_available
UpperCAmelCase__ : List[str] = {
'configuration_ctrl': ['CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP', 'CTRLConfig'],
'tokenization_ctrl': ['CTRLTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Optional[Any] = [
'CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'CTRLForSequenceClassification',
'CTRLLMHeadModel',
'CTRLModel',
'CTRLPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
UpperCAmelCase__ : Tuple = [
'TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFCTRLForSequenceClassification',
'TFCTRLLMHeadModel',
'TFCTRLModel',
'TFCTRLPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_ctrl import CTRL_PRETRAINED_CONFIG_ARCHIVE_MAP, CTRLConfig
from .tokenization_ctrl import CTRLTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_ctrl import (
CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
CTRLForSequenceClassification,
CTRLLMHeadModel,
CTRLModel,
CTRLPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_ctrl import (
TF_CTRL_PRETRAINED_MODEL_ARCHIVE_LIST,
TFCTRLForSequenceClassification,
TFCTRLLMHeadModel,
TFCTRLModel,
TFCTRLPreTrainedModel,
)
else:
import sys
UpperCAmelCase__ : Dict = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 25 |
'''simple docstring'''
import random
import unittest
import torch
from diffusers import IFInpaintingPipeline
from diffusers.utils import floats_tensor
from diffusers.utils.import_utils import is_xformers_available
from diffusers.utils.testing_utils import skip_mps, torch_device
from ..pipeline_params import (
TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS,
TEXT_GUIDED_IMAGE_INPAINTING_PARAMS,
)
from ..test_pipelines_common import PipelineTesterMixin
from . import IFPipelineTesterMixin
@skip_mps
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = IFInpaintingPipeline
A_ : int = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"""width""", """height"""}
A_ : Any = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS
A_ : Union[str, Any] = PipelineTesterMixin.required_optional_params - {"""latents"""}
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
return self._get_dummy_components()
def __lowerCAmelCase ( self : Optional[int] , _A : Dict , _A : Optional[int]=0 ) -> List[Any]:
if str(_A ).startswith('mps' ):
__magic_name__ : Optional[Any] = torch.manual_seed(_A )
else:
__magic_name__ : Tuple = torch.Generator(device=_A ).manual_seed(_A )
__magic_name__ : List[str] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Optional[int] = floats_tensor((1, 3, 32, 32) , rng=random.Random(_A ) ).to(_A )
__magic_name__ : Tuple = {
'prompt': 'A painting of a squirrel eating a burger',
'image': image,
'mask_image': mask_image,
'generator': generator,
'num_inference_steps': 2,
'output_type': 'numpy',
}
return inputs
@unittest.skipIf(
torch_device != 'cuda' or not is_xformers_available() , reason='XFormers attention is only available with CUDA and `xformers` installed' , )
def __lowerCAmelCase ( self : List[Any] ) -> int:
self._test_xformers_attention_forwardGenerator_pass(expected_max_diff=1E-3 )
def __lowerCAmelCase ( self : Optional[Any] ) -> List[Any]:
self._test_save_load_optional_components()
@unittest.skipIf(torch_device != 'cuda' , reason='float16 requires CUDA' )
def __lowerCAmelCase ( self : Dict ) -> Any:
# Due to non-determinism in save load of the hf-internal-testing/tiny-random-t5 text encoder
super().test_save_load_floataa(expected_max_diff=1E-1 )
def __lowerCAmelCase ( self : Tuple ) -> int:
self._test_attention_slicing_forward_pass(expected_max_diff=1E-2 )
def __lowerCAmelCase ( self : Optional[int] ) -> List[str]:
self._test_save_load_local()
def __lowerCAmelCase ( self : Any ) -> int:
self._test_inference_batch_single_identical(
expected_max_diff=1E-2 , ) | 331 | 0 |
import json
import pathlib
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision, slow
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import DetrImageProcessor
class lowercase ( unittest.TestCase ):
def __init__( self , _a , _a=7 , _a=3 , _a=30 , _a=400 , _a=True , _a=None , _a=True , _a=1 / 255 , _a=True , _a=[0.5, 0.5, 0.5] , _a=[0.5, 0.5, 0.5] , _a=True , ) -> Dict:
# by setting size["longest_edge"] > max_resolution we're effectively not testing this :p
_A : List[str] = size if size is not None else {"""shortest_edge""": 18, """longest_edge""": 1333}
_A : int = parent
_A : Optional[Any] = batch_size
_A : List[str] = num_channels
_A : Dict = min_resolution
_A : Union[str, Any] = max_resolution
_A : Optional[Any] = do_resize
_A : Optional[int] = size
_A : Optional[Any] = do_rescale
_A : Optional[Any] = rescale_factor
_A : Optional[int] = do_normalize
_A : List[str] = image_mean
_A : Optional[Any] = image_std
_A : Union[str, Any] = do_pad
def a__ ( self ) -> Dict:
return {
"do_resize": self.do_resize,
"size": self.size,
"do_rescale": self.do_rescale,
"rescale_factor": self.rescale_factor,
"do_normalize": self.do_normalize,
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_pad": self.do_pad,
}
def a__ ( self , _a , _a=False ) -> str:
if not batched:
_A : Optional[Any] = image_inputs[0]
if isinstance(_a , Image.Image ):
_A , _A : Union[str, Any] = image.size
else:
_A , _A : int = image.shape[1], image.shape[2]
if w < h:
_A : Optional[int] = int(self.size["""shortest_edge"""] * h / w )
_A : Optional[Any] = self.size["""shortest_edge"""]
elif w > h:
_A : List[Any] = self.size["""shortest_edge"""]
_A : List[str] = int(self.size["""shortest_edge"""] * w / h )
else:
_A : List[Any] = self.size["""shortest_edge"""]
_A : Any = self.size["""shortest_edge"""]
else:
_A : Optional[int] = []
for image in image_inputs:
_A , _A : Any = self.get_expected_values([image] )
expected_values.append((expected_height, expected_width) )
_A : List[Any] = max(_a , key=lambda _a : item[0] )[0]
_A : Tuple = max(_a , key=lambda _a : item[1] )[1]
return expected_height, expected_width
@require_torch
@require_vision
class lowercase ( UpperCamelCase__,unittest.TestCase ):
_a = DetrImageProcessor if is_vision_available() else None
def a__ ( self ) -> List[str]:
_A : List[str] = DetrImageProcessingTester(self )
@property
def a__ ( self ) -> Optional[int]:
return self.image_processor_tester.prepare_image_processor_dict()
def a__ ( self ) -> Optional[Any]:
_A : Optional[int] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a , """image_mean""" ) )
self.assertTrue(hasattr(_a , """image_std""" ) )
self.assertTrue(hasattr(_a , """do_normalize""" ) )
self.assertTrue(hasattr(_a , """do_rescale""" ) )
self.assertTrue(hasattr(_a , """rescale_factor""" ) )
self.assertTrue(hasattr(_a , """do_resize""" ) )
self.assertTrue(hasattr(_a , """size""" ) )
self.assertTrue(hasattr(_a , """do_pad""" ) )
def a__ ( self ) -> str:
_A : Optional[Any] = self.image_processing_class.from_dict(self.image_processor_dict )
self.assertEqual(image_processor.size , {"""shortest_edge""": 18, """longest_edge""": 1333} )
self.assertEqual(image_processor.do_pad , _a )
_A : Any = self.image_processing_class.from_dict(
self.image_processor_dict , size=42 , max_size=84 , pad_and_return_pixel_mask=_a )
self.assertEqual(image_processor.size , {"""shortest_edge""": 42, """longest_edge""": 84} )
self.assertEqual(image_processor.do_pad , _a )
def a__ ( self ) -> Optional[int]:
pass
def a__ ( self ) -> int:
# Initialize image_processing
_A : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
_A : Tuple = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a , Image.Image )
# Test not batched input
_A : int = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_A , _A : int = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A , _A : List[Any] = self.image_processor_tester.get_expected_values(_a , batched=_a )
_A : Dict = image_processing(_a , return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ) -> List[Any]:
# Initialize image_processing
_A : int = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
_A : List[str] = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a , np.ndarray )
# Test not batched input
_A : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_A , _A : Optional[int] = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A : Union[str, Any] = image_processing(_a , return_tensors="""pt""" ).pixel_values
_A , _A : Dict = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
def a__ ( self ) -> Optional[int]:
# Initialize image_processing
_A : List[str] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
_A : Any = prepare_image_inputs(self.image_processor_tester , equal_resolution=_a , torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a , torch.Tensor )
# Test not batched input
_A : List[Any] = image_processing(image_inputs[0] , return_tensors="""pt""" ).pixel_values
_A , _A : str = self.image_processor_tester.get_expected_values(_a )
self.assertEqual(
encoded_images.shape , (1, self.image_processor_tester.num_channels, expected_height, expected_width) , )
# Test batched
_A : Tuple = image_processing(_a , return_tensors="""pt""" ).pixel_values
_A , _A : List[str] = self.image_processor_tester.get_expected_values(_a , batched=_a )
self.assertEqual(
encoded_images.shape , (
self.image_processor_tester.batch_size,
self.image_processor_tester.num_channels,
expected_height,
expected_width,
) , )
@slow
def a__ ( self ) -> Tuple:
# prepare image and target
_A : Tuple = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_annotations.txt""" , """r""" ) as f:
_A : Union[str, Any] = json.loads(f.read() )
_A : List[str] = {"""image_id""": 3_9769, """annotations""": target}
# encode them
_A : List[Any] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50""" )
_A : Any = image_processing(images=_a , annotations=_a , return_tensors="""pt""" )
# verify pixel values
_A : Tuple = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _a )
_A : str = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _a , atol=1e-4 ) )
# verify area
_A : Dict = torch.tensor([5887.9600, 11250.2061, 489353.8438, 837122.7500, 147967.5156, 165732.3438] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _a ) )
# verify boxes
_A : str = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _a )
_A : List[Any] = torch.tensor([0.5503, 0.2765, 0.0604, 0.2215] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _a , atol=1e-3 ) )
# verify image_id
_A : Tuple = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _a ) )
# verify is_crowd
_A : str = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _a ) )
# verify class_labels
_A : int = torch.tensor([75, 75, 63, 65, 17, 17] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _a ) )
# verify orig_size
_A : List[Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _a ) )
# verify size
_A : Optional[Any] = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _a ) )
@slow
def a__ ( self ) -> Optional[int]:
# prepare image, target and masks_path
_A : str = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""" )
with open("""./tests/fixtures/tests_samples/COCO/coco_panoptic_annotations.txt""" , """r""" ) as f:
_A : Any = json.loads(f.read() )
_A : Optional[Any] = {"""file_name""": """000000039769.png""", """image_id""": 3_9769, """segments_info""": target}
_A : Tuple = pathlib.Path("""./tests/fixtures/tests_samples/COCO/coco_panoptic""" )
# encode them
_A : Optional[int] = DetrImageProcessor.from_pretrained("""facebook/detr-resnet-50-panoptic""" )
_A : Tuple = image_processing(images=_a , annotations=_a , masks_path=_a , return_tensors="""pt""" )
# verify pixel values
_A : str = torch.Size([1, 3, 800, 1066] )
self.assertEqual(encoding["""pixel_values"""].shape , _a )
_A : Dict = torch.tensor([0.2796, 0.3138, 0.3481] )
self.assertTrue(torch.allclose(encoding["""pixel_values"""][0, 0, 0, :3] , _a , atol=1e-4 ) )
# verify area
_A : List[Any] = torch.tensor([147979.6875, 165527.0469, 484638.5938, 11292.9375, 5879.6562, 7634.1147] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""area"""] , _a ) )
# verify boxes
_A : List[str] = torch.Size([6, 4] )
self.assertEqual(encoding["""labels"""][0]["""boxes"""].shape , _a )
_A : Any = torch.tensor([0.2625, 0.5437, 0.4688, 0.8625] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""boxes"""][0] , _a , atol=1e-3 ) )
# verify image_id
_A : Optional[Any] = torch.tensor([3_9769] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""image_id"""] , _a ) )
# verify is_crowd
_A : Union[str, Any] = torch.tensor([0, 0, 0, 0, 0, 0] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""iscrowd"""] , _a ) )
# verify class_labels
_A : Union[str, Any] = torch.tensor([17, 17, 63, 75, 75, 93] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""class_labels"""] , _a ) )
# verify masks
_A : List[str] = 82_2873
self.assertEqual(encoding["""labels"""][0]["""masks"""].sum().item() , _a )
# verify orig_size
_A : Union[str, Any] = torch.tensor([480, 640] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""orig_size"""] , _a ) )
# verify size
_A : Dict = torch.tensor([800, 1066] )
self.assertTrue(torch.allclose(encoding["""labels"""][0]["""size"""] , _a ) )
| 26 |
'''simple docstring'''
import unittest
from transformers import DebertaConfig, is_torch_available
from transformers.testing_utils import require_sentencepiece, require_tokenizers, require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
DebertaForMaskedLM,
DebertaForQuestionAnswering,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaModel,
)
from transformers.models.deberta.modeling_deberta import DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : List[Any] , _A : str , _A : str=13 , _A : Union[str, Any]=7 , _A : Tuple=True , _A : Dict=True , _A : List[str]=True , _A : Optional[int]=True , _A : Dict=99 , _A : Optional[Any]=32 , _A : Optional[int]=5 , _A : str=4 , _A : str=37 , _A : Tuple="gelu" , _A : Any=0.1 , _A : Dict=0.1 , _A : str=512 , _A : Tuple=16 , _A : str=2 , _A : int=0.02 , _A : int=False , _A : List[str]=True , _A : List[Any]="None" , _A : List[str]=3 , _A : Optional[Any]=4 , _A : Dict=None , ) -> Dict:
__magic_name__ : Union[str, Any] = parent
__magic_name__ : Any = batch_size
__magic_name__ : Optional[int] = seq_length
__magic_name__ : List[str] = is_training
__magic_name__ : Optional[Any] = use_input_mask
__magic_name__ : Dict = use_token_type_ids
__magic_name__ : str = use_labels
__magic_name__ : int = vocab_size
__magic_name__ : List[Any] = hidden_size
__magic_name__ : Dict = num_hidden_layers
__magic_name__ : Dict = num_attention_heads
__magic_name__ : Tuple = intermediate_size
__magic_name__ : Any = hidden_act
__magic_name__ : Union[str, Any] = hidden_dropout_prob
__magic_name__ : Union[str, Any] = attention_probs_dropout_prob
__magic_name__ : List[Any] = max_position_embeddings
__magic_name__ : Any = type_vocab_size
__magic_name__ : Union[str, Any] = type_sequence_label_size
__magic_name__ : Union[str, Any] = initializer_range
__magic_name__ : str = num_labels
__magic_name__ : Tuple = num_choices
__magic_name__ : Any = relative_attention
__magic_name__ : str = position_biased_input
__magic_name__ : str = pos_att_type
__magic_name__ : Union[str, Any] = scope
def __lowerCAmelCase ( self : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : List[Any] = None
if self.use_input_mask:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , vocab_size=2 )
__magic_name__ : int = None
if self.use_token_type_ids:
__magic_name__ : List[str] = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : List[str] = None
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
if self.use_labels:
__magic_name__ : List[Any] = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : Optional[Any] = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Any = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : Tuple ) -> Optional[Any]:
return DebertaConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , relative_attention=self.relative_attention , position_biased_input=self.position_biased_input , pos_att_type=self.pos_att_type , )
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
__magic_name__ : List[Any] = self.get_config()
__magic_name__ : Union[str, Any] = 300
return config
def __lowerCAmelCase ( self : int , _A : Dict ) -> Tuple:
self.parent.assertListEqual(list(result.loss.size() ) , [] )
def __lowerCAmelCase ( self : Any , _A : Optional[int] , _A : Optional[Any] , _A : Optional[int] , _A : Optional[int] , _A : Any , _A : str , _A : List[Any] ) -> List[Any]:
__magic_name__ : Dict = DebertaModel(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[Any] = model(_A , attention_mask=_A , token_type_ids=_A )[0]
__magic_name__ : Optional[int] = model(_A , token_type_ids=_A )[0]
__magic_name__ : List[str] = model(_A )[0]
self.parent.assertListEqual(list(sequence_output.size() ) , [self.batch_size, self.seq_length, self.hidden_size] )
def __lowerCAmelCase ( self : Any , _A : Union[str, Any] , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Dict , _A : Optional[Any] , _A : Optional[int] ) -> Dict:
__magic_name__ : List[str] = DebertaForMaskedLM(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : str , _A : Union[str, Any] , _A : List[str] , _A : Optional[int] , _A : Optional[int] , _A : str , _A : Union[str, Any] , _A : Any ) -> Union[str, Any]:
__magic_name__ : Optional[int] = self.num_labels
__magic_name__ : Optional[Any] = DebertaForSequenceClassification(_A )
model.to(_A )
model.eval()
__magic_name__ : Any = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertListEqual(list(result.logits.size() ) , [self.batch_size, self.num_labels] )
self.check_loss_output(_A )
def __lowerCAmelCase ( self : Tuple , _A : str , _A : str , _A : int , _A : str , _A : int , _A : Optional[int] , _A : List[str] ) -> Optional[int]:
__magic_name__ : str = self.num_labels
__magic_name__ : int = DebertaForTokenClassification(config=_A )
model.to(_A )
model.eval()
__magic_name__ : List[str] = model(_A , attention_mask=_A , token_type_ids=_A , labels=_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[Any] , _A : str , _A : Tuple , _A : Optional[int] , _A : Any , _A : Optional[int] , _A : Dict , _A : Union[str, Any] ) -> List[Any]:
__magic_name__ : int = DebertaForQuestionAnswering(config=_A )
model.to(_A )
model.eval()
__magic_name__ : Optional[int] = model(
_A , attention_mask=_A , token_type_ids=_A , start_positions=_A , end_positions=_A , )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
__magic_name__ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : int = config_and_inputs
__magic_name__ : Optional[Any] = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_torch
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : List[Any] = (
(
DebertaModel,
DebertaForMaskedLM,
DebertaForSequenceClassification,
DebertaForTokenClassification,
DebertaForQuestionAnswering,
)
if is_torch_available()
else ()
)
A_ : Tuple = (
{
"""feature-extraction""": DebertaModel,
"""fill-mask""": DebertaForMaskedLM,
"""question-answering""": DebertaForQuestionAnswering,
"""text-classification""": DebertaForSequenceClassification,
"""token-classification""": DebertaForTokenClassification,
"""zero-shot""": DebertaForSequenceClassification,
}
if is_torch_available()
else {}
)
A_ : Union[str, Any] = True
A_ : Any = False
A_ : Dict = False
A_ : str = False
A_ : Dict = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
__magic_name__ : List[str] = DebertaModelTester(self )
__magic_name__ : Tuple = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_model(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : Any ) -> str:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_masked_lm(*_A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Optional[int] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_question_answering(*_A )
def __lowerCAmelCase ( self : str ) -> List[Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_deberta_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : str ) -> Optional[Any]:
for model_name in DEBERTA_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
__magic_name__ : int = DebertaModel.from_pretrained(_A )
self.assertIsNotNone(_A )
@require_torch
@require_sentencepiece
@require_tokenizers
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@unittest.skip(reason='Model not available yet' )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
pass
@slow
def __lowerCAmelCase ( self : Dict ) -> Tuple:
__magic_name__ : int = DebertaModel.from_pretrained('microsoft/deberta-base' )
__magic_name__ : List[Any] = torch.tensor([[0, 31414, 232, 328, 740, 1140, 12695, 69, 46078, 1588, 2]] )
__magic_name__ : Union[str, Any] = torch.tensor([[0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]] )
with torch.no_grad():
__magic_name__ : Optional[int] = model(_A , attention_mask=_A )[0]
# compare the actual values for a slice.
__magic_name__ : Tuple = torch.tensor(
[[[-0.5986, -0.8055, -0.8462], [1.4484, -0.9348, -0.8059], [0.3123, 0.0032, -1.4131]]] )
self.assertTrue(torch.allclose(output[:, 1:4, 1:4] , _A , atol=1E-4 ) , F'{output[:, 1:4, 1:4]}' ) | 331 | 0 |
'''simple docstring'''
import gc
import random
import tempfile
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMInverseScheduler,
DDIMScheduler,
DPMSolverMultistepInverseScheduler,
DPMSolverMultistepScheduler,
StableDiffusionDiffEditPipeline,
UNetaDConditionModel,
)
from diffusers.utils import load_image, slow
from diffusers.utils.testing_utils import enable_full_determinism, floats_tensor, require_torch_gpu, torch_device
from ..pipeline_params import TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS, TEXT_GUIDED_IMAGE_INPAINTING_PARAMS
from ..test_pipelines_common import PipelineLatentTesterMixin, PipelineTesterMixin
enable_full_determinism()
class __UpperCamelCase ( lowerCAmelCase_ , lowerCAmelCase_ , unittest.TestCase ):
A_ = StableDiffusionDiffEditPipeline
A_ = TEXT_GUIDED_IMAGE_INPAINTING_PARAMS - {"height", "width", "image"} | {"image_latents"}
A_ = TEXT_GUIDED_IMAGE_INPAINTING_BATCH_PARAMS - {"image"} | {"image_latents"}
A_ = frozenset(
[] ) # TO-DO: update image_params once pipeline is refactored with VaeImageProcessor.preprocess
A_ = frozenset([] )
def __UpperCAmelCase ( self ):
'''simple docstring'''
torch.manual_seed(0 )
__a : List[str] = UNetaDConditionModel(
block_out_channels=(32, 64) , layers_per_block=2 , sample_size=32 , in_channels=4 , out_channels=4 , down_block_types=('DownBlock2D', 'CrossAttnDownBlock2D') , up_block_types=('CrossAttnUpBlock2D', 'UpBlock2D') , cross_attention_dim=32 , attention_head_dim=(2, 4) , use_linear_projection=__a , )
__a : Tuple = DDIMScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__a , set_alpha_to_one=__a , )
__a : Any = DDIMInverseScheduler(
beta_start=0.00085 , beta_end=0.012 , beta_schedule='scaled_linear' , clip_sample=__a , set_alpha_to_zero=__a , )
torch.manual_seed(0 )
__a : Union[str, Any] = AutoencoderKL(
block_out_channels=[32, 64] , in_channels=3 , out_channels=3 , down_block_types=['DownEncoderBlock2D', 'DownEncoderBlock2D'] , up_block_types=['UpDecoderBlock2D', 'UpDecoderBlock2D'] , latent_channels=4 , sample_size=128 , )
torch.manual_seed(0 )
__a : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 , eos_token_id=2 , hidden_size=32 , intermediate_size=37 , layer_norm_eps=1E-0_5 , num_attention_heads=4 , num_hidden_layers=5 , pad_token_id=1 , vocab_size=1000 , hidden_act='gelu' , projection_dim=512 , )
__a : List[Any] = CLIPTextModel(__a )
__a : Any = CLIPTokenizer.from_pretrained('hf-internal-testing/tiny-random-clip' )
__a : List[Any] = {
'unet': unet,
'scheduler': scheduler,
'inverse_scheduler': inverse_scheduler,
'vae': vae,
'text_encoder': text_encoder,
'tokenizer': tokenizer,
'safety_checker': None,
'feature_extractor': None,
}
return components
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : int = floats_tensor((1, 16, 16) , rng=random.Random(__a ) ).to(__a )
__a : Union[str, Any] = floats_tensor((1, 2, 4, 16, 16) , rng=random.Random(__a ) ).to(__a )
if str(__a ).startswith('mps' ):
__a : Any = torch.manual_seed(__a )
else:
__a : Optional[int] = torch.Generator(device=__a ).manual_seed(__a )
__a : Any = {
'prompt': 'a dog and a newt',
'mask_image': mask,
'image_latents': latents,
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : Tuple = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__a : Optional[Any] = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : List[str] = Image.fromarray(np.uinta(__a ) ).convert('RGB' )
if str(__a ).startswith('mps' ):
__a : Optional[int] = torch.manual_seed(__a )
else:
__a : Optional[int] = torch.Generator(device=__a ).manual_seed(__a )
__a : str = {
'image': image,
'source_prompt': 'a cat and a frog',
'target_prompt': 'a dog and a newt',
'generator': generator,
'num_inference_steps': 2,
'num_maps_per_mask': 2,
'mask_encode_strength': 1.0,
'guidance_scale': 6.0,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self , __a , __a=0 ):
'''simple docstring'''
__a : Union[str, Any] = floats_tensor((1, 3, 32, 32) , rng=random.Random(__a ) ).to(__a )
__a : int = image.cpu().permute(0 , 2 , 3 , 1 )[0]
__a : str = Image.fromarray(np.uinta(__a ) ).convert('RGB' )
if str(__a ).startswith('mps' ):
__a : Optional[Any] = torch.manual_seed(__a )
else:
__a : List[str] = torch.Generator(device=__a ).manual_seed(__a )
__a : str = {
'image': image,
'prompt': 'a cat and a frog',
'generator': generator,
'num_inference_steps': 2,
'inpaint_strength': 1.0,
'guidance_scale': 6.0,
'decode_latents': True,
'output_type': 'numpy',
}
return inputs
def __UpperCAmelCase ( self ):
'''simple docstring'''
if not hasattr(self.pipeline_class , '_optional_components' ):
return
__a : Tuple = self.get_dummy_components()
__a : List[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
# set all optional components to None and update pipeline config accordingly
for optional_component in pipe._optional_components:
setattr(__a , __a , __a )
pipe.register_modules(**{optional_component: None for optional_component in pipe._optional_components} )
__a : Optional[Any] = self.get_dummy_inputs(__a )
__a : str = pipe(**__a )[0]
with tempfile.TemporaryDirectory() as tmpdir:
pipe.save_pretrained(__a )
__a : Any = self.pipeline_class.from_pretrained(__a )
pipe_loaded.to(__a )
pipe_loaded.set_progress_bar_config(disable=__a )
for optional_component in pipe._optional_components:
self.assertTrue(
getattr(__a , __a ) is None , f"""`{optional_component}` did not stay set to None after loading.""" , )
__a : str = self.get_dummy_inputs(__a )
__a : Tuple = pipe_loaded(**__a )[0]
__a : str = np.abs(output - output_loaded ).max()
self.assertLess(__a , 1E-4 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Tuple = 'cpu'
__a : Union[str, Any] = self.get_dummy_components()
__a : Optional[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : Any = self.get_dummy_mask_inputs(__a )
__a : Dict = pipe.generate_mask(**__a )
__a : Optional[int] = mask[0, -3:, -3:]
self.assertEqual(mask.shape , (1, 16, 16) )
__a : int = np.array([0] * 9 )
__a : str = np.abs(mask_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1E-3 )
self.assertEqual(mask[0, -3, -4] , 0 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = 'cpu'
__a : List[Any] = self.get_dummy_components()
__a : Optional[Any] = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : Dict = self.get_dummy_inversion_inputs(__a )
__a : Optional[int] = pipe.invert(**__a ).images
__a : Optional[int] = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__a : Optional[Any] = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
__a : str = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1E-3 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().test_inference_batch_single_identical(expected_max_diff=5E-3 )
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = 'cpu'
__a : str = self.get_dummy_components()
__a : str = {'beta_start': 0.00085, 'beta_end': 0.012, 'beta_schedule': 'scaled_linear'}
__a : Any = DPMSolverMultistepScheduler(**__a )
__a : Any = DPMSolverMultistepInverseScheduler(**__a )
__a : Tuple = self.pipeline_class(**__a )
pipe.to(__a )
pipe.set_progress_bar_config(disable=__a )
__a : str = self.get_dummy_inversion_inputs(__a )
__a : List[Any] = pipe.invert(**__a ).images
__a : Any = image[0, -1, -3:, -3:]
self.assertEqual(image.shape , (2, 32, 32, 3) )
__a : int = np.array(
[0.5150, 0.5134, 0.5043, 0.5376, 0.4694, 0.51050, 0.5015, 0.4407, 0.4799] , )
__a : Union[str, Any] = np.abs(image_slice.flatten() - expected_slice ).max()
self.assertLessEqual(__a , 1E-3 )
@require_torch_gpu
@slow
class __UpperCamelCase ( unittest.TestCase ):
def __UpperCAmelCase ( self ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@classmethod
def __UpperCAmelCase ( cls ):
'''simple docstring'''
__a : Optional[int] = load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/diffedit/fruit.png' )
__a : str = raw_image.convert('RGB' ).resize((768, 768) )
__a : str = raw_image
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Dict = torch.manual_seed(0 )
__a : Any = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__a , torch_dtype=torch.floataa )
__a : Tuple = DDIMScheduler.from_config(pipe.scheduler.config )
__a : List[Any] = DDIMInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__a )
__a : int = 'a bowl of fruit'
__a : Dict = 'a bowl of pears'
__a : List[Any] = pipe.generate_mask(
image=self.raw_image , source_prompt=__a , target_prompt=__a , generator=__a , )
__a : str = pipe.invert(
prompt=__a , image=self.raw_image , inpaint_strength=0.7 , generator=__a ).latents
__a : Dict = pipe(
prompt=__a , mask_image=__a , image_latents=__a , generator=__a , negative_prompt=__a , inpaint_strength=0.7 , output_type='numpy' , ).images[0]
__a : str = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
def __UpperCAmelCase ( self ):
'''simple docstring'''
__a : Any = torch.manual_seed(0 )
__a : List[str] = StableDiffusionDiffEditPipeline.from_pretrained(
'stabilityai/stable-diffusion-2-1' , safety_checker=__a , torch_dtype=torch.floataa )
__a : Union[str, Any] = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
__a : List[str] = DPMSolverMultistepInverseScheduler.from_config(pipe.scheduler.config )
pipe.enable_model_cpu_offload()
pipe.set_progress_bar_config(disable=__a )
__a : Dict = 'a bowl of fruit'
__a : Optional[Any] = 'a bowl of pears'
__a : Optional[int] = pipe.generate_mask(
image=self.raw_image , source_prompt=__a , target_prompt=__a , generator=__a , )
__a : Any = pipe.invert(
prompt=__a , image=self.raw_image , inpaint_strength=0.7 , generator=__a , num_inference_steps=25 , ).latents
__a : List[str] = pipe(
prompt=__a , mask_image=__a , image_latents=__a , generator=__a , negative_prompt=__a , inpaint_strength=0.7 , num_inference_steps=25 , output_type='numpy' , ).images[0]
__a : int = (
np.array(
load_image(
'https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main'
'/diffedit/pears.png' ).resize((768, 768) ) )
/ 255
)
assert np.abs((expected_image - image).max() ) < 5E-1
| 27 |
'''simple docstring'''
class _lowerCamelCase : # Public class to implement a graph
'''simple docstring'''
def __init__( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
__magic_name__ : Tuple = row
__magic_name__ : str = col
__magic_name__ : Optional[Any] = graph
def __lowerCAmelCase ( self : Any , _A : int , _A : int , _A : list[list[bool]] ) -> bool:
return (
0 <= i < self.ROW
and 0 <= j < self.COL
and not visited[i][j]
and self.graph[i][j]
)
def __lowerCAmelCase ( self : List[Any] , _A : int , _A : int , _A : list[list[bool]] ) -> None:
# Checking all 8 elements surrounding nth element
__magic_name__ : List[str] = [-1, -1, -1, 0, 0, 1, 1, 1] # Coordinate order
__magic_name__ : List[str] = [-1, 0, 1, -1, 1, -1, 0, 1]
__magic_name__ : Optional[int] = True # Make those cells visited
for k in range(8 ):
if self.is_safe(i + row_nbr[k] , j + col_nbr[k] , _A ):
self.diffs(i + row_nbr[k] , j + col_nbr[k] , _A )
def __lowerCAmelCase ( self : int ) -> int: # And finally, count all islands.
__magic_name__ : List[str] = [[False for j in range(self.COL )] for i in range(self.ROW )]
__magic_name__ : Any = 0
for i in range(self.ROW ):
for j in range(self.COL ):
if visited[i][j] is False and self.graph[i][j] == 1:
self.diffs(_A , _A , _A )
count += 1
return count | 331 | 0 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
_lowerCamelCase : List[str] = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_lowerCamelCase : Optional[Any] = ["MLukeTokenizer"]
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
_lowerCamelCase : List[Any] = _LazyModule(__name__, globals()["__file__"], _import_structure, module_spec=__spec__)
| 28 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
is_vision_available,
)
lowerCAmelCase :Tuple = {'''processing_layoutxlm''': ['''LayoutXLMProcessor''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''LayoutXLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = ['''LayoutXLMTokenizerFast''']
if TYPE_CHECKING:
from .processing_layoutxlm import LayoutXLMProcessor
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm import LayoutXLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_layoutxlm_fast import LayoutXLMTokenizerFast
else:
import sys
lowerCAmelCase :str = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | 0 |
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_sentencepiece_available
__UpperCAmelCase = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__UpperCAmelCase = ['MLukeTokenizer']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mluke import MLukeTokenizer
else:
import sys
__UpperCAmelCase = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 29 |
'''simple docstring'''
from __future__ import annotations
from math import ceil, floor, sqrt
def lowerCamelCase ( lowerCAmelCase : int = 200_0000 ):
"""simple docstring"""
__magic_name__ : list[int] = [0]
__magic_name__ : int
for idx in range(1 , ceil(sqrt(target * 2 ) * 1.1 ) ):
triangle_numbers.append(triangle_numbers[-1] + idx )
# we want this to be as close as possible to target
__magic_name__ : int = 0
# the area corresponding to the grid that gives the product closest to target
__magic_name__ : int = 0
# an estimate of b, using the quadratic formula
__magic_name__ : float
# the largest integer less than b_estimate
__magic_name__ : int
# the largest integer less than b_estimate
__magic_name__ : int
# the triangle number corresponding to b_floor
__magic_name__ : int
# the triangle number corresponding to b_ceil
__magic_name__ : int
for idx_a, triangle_a in enumerate(triangle_numbers[1:] , 1 ):
__magic_name__ : Dict = (-1 + sqrt(1 + 8 * target / triangle_a )) / 2
__magic_name__ : List[Any] = floor(lowerCAmelCase )
__magic_name__ : Dict = ceil(lowerCAmelCase )
__magic_name__ : Any = triangle_numbers[b_floor]
__magic_name__ : Optional[int] = triangle_numbers[b_ceil]
if abs(target - triangle_b_first_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : Any = triangle_b_first_guess * triangle_a
__magic_name__ : Any = idx_a * b_floor
if abs(target - triangle_b_second_guess * triangle_a ) < abs(
target - best_product ):
__magic_name__ : List[str] = triangle_b_second_guess * triangle_a
__magic_name__ : Optional[int] = idx_a * b_ceil
return area
if __name__ == "__main__":
print(F'{solution() = }') | 331 | 0 |
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__a = logging.get_logger(__name__)
__a = {
# See all MEGATRON_BERT models at https://huggingface.co/models?filter=bert
}
class lowercase__( UpperCAmelCase ):
"""simple docstring"""
a :Dict = 'megatron-bert'
def __init__( self : Any , SCREAMING_SNAKE_CASE_ : Dict=2_9_0_5_6 , SCREAMING_SNAKE_CASE_ : Tuple=1_0_2_4 , SCREAMING_SNAKE_CASE_ : Optional[int]=2_4 , SCREAMING_SNAKE_CASE_ : Tuple=1_6 , SCREAMING_SNAKE_CASE_ : Any=4_0_9_6 , SCREAMING_SNAKE_CASE_ : Any="gelu" , SCREAMING_SNAKE_CASE_ : str=0.1 , SCREAMING_SNAKE_CASE_ : Tuple=0.1 , SCREAMING_SNAKE_CASE_ : int=5_1_2 , SCREAMING_SNAKE_CASE_ : Optional[Any]=2 , SCREAMING_SNAKE_CASE_ : Any=0.02 , SCREAMING_SNAKE_CASE_ : Any=1e-12 , SCREAMING_SNAKE_CASE_ : Union[str, Any]=0 , SCREAMING_SNAKE_CASE_ : int="absolute" , SCREAMING_SNAKE_CASE_ : int=True , **SCREAMING_SNAKE_CASE_ : Optional[int] , ) -> Any:
super().__init__(pad_token_id=SCREAMING_SNAKE_CASE_ , **SCREAMING_SNAKE_CASE_ )
lowercase_ = vocab_size
lowercase_ = hidden_size
lowercase_ = num_hidden_layers
lowercase_ = num_attention_heads
lowercase_ = hidden_act
lowercase_ = intermediate_size
lowercase_ = hidden_dropout_prob
lowercase_ = attention_probs_dropout_prob
lowercase_ = max_position_embeddings
lowercase_ = type_vocab_size
lowercase_ = initializer_range
lowercase_ = layer_norm_eps
lowercase_ = position_embedding_type
lowercase_ = use_cache
| 30 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
lowerCAmelCase :str = {'''configuration_xglm''': ['''XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''XGLMConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[Any] = ['''XGLMTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Dict = ['''XGLMTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Tuple = [
'''XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''XGLMForCausalLM''',
'''XGLMModel''',
'''XGLMPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :int = [
'''FlaxXGLMForCausalLM''',
'''FlaxXGLMModel''',
'''FlaxXGLMPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Any = [
'''TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''TFXGLMForCausalLM''',
'''TFXGLMModel''',
'''TFXGLMPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_xglm import XGLM_PRETRAINED_CONFIG_ARCHIVE_MAP, XGLMConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm import XGLMTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_xglm_fast import XGLMTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_xglm import XGLM_PRETRAINED_MODEL_ARCHIVE_LIST, XGLMForCausalLM, XGLMModel, XGLMPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_xglm import FlaxXGLMForCausalLM, FlaxXGLMModel, FlaxXGLMPreTrainedModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_xglm import (
TF_XGLM_PRETRAINED_MODEL_ARCHIVE_LIST,
TFXGLMForCausalLM,
TFXGLMModel,
TFXGLMPreTrainedModel,
)
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure) | 331 | 0 |
'''simple docstring'''
import os
import unittest
from huggingface_hub.utils import are_progress_bars_disabled
import transformers.models.bart.tokenization_bart
from transformers import logging
from transformers.testing_utils import CaptureLogger, mockenv, mockenv_context
from transformers.utils.logging import disable_progress_bar, enable_progress_bar
class lowerCamelCase_ (unittest.TestCase ):
'''simple docstring'''
def _A ( self : List[Any] ):
_UpperCAmelCase : List[Any] = logging.get_logger()
# the current default level is logging.WARNING
_UpperCAmelCase : int = logging.get_verbosity()
logging.set_verbosity_error()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_warning()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_info()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
logging.set_verbosity_debug()
self.assertEqual(logger.getEffectiveLevel() , logging.get_verbosity() )
# restore to the original level
logging.set_verbosity(A )
def _A ( self : int ):
_UpperCAmelCase : int = logging.get_verbosity()
_UpperCAmelCase : int = logging.get_logger("transformers.models.bart.tokenization_bart" )
_UpperCAmelCase : List[str] = "Testing 1, 2, 3"
# should be able to log warnings (if default settings weren't overridden by `pytest --log-level-all`)
if level_origin <= logging.WARNING:
with CaptureLogger(A ) as cl:
logger.warning(A )
self.assertEqual(cl.out , msg + "\n" )
# this is setting the level for all of `transformers.*` loggers
logging.set_verbosity_error()
# should not be able to log warnings
with CaptureLogger(A ) as cl:
logger.warning(A )
self.assertEqual(cl.out , "" )
# should be able to log warnings again
logging.set_verbosity_warning()
with CaptureLogger(A ) as cl:
logger.warning(A )
self.assertEqual(cl.out , msg + "\n" )
# restore to the original level
logging.set_verbosity(A )
@mockenv(TRANSFORMERS_VERBOSITY="error" )
def _A ( self : Dict ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
# this action activates the env var
_UpperCAmelCase : str = logging.get_logger("transformers.models.bart.tokenization_bart" )
_UpperCAmelCase : Optional[int] = os.getenv("TRANSFORMERS_VERBOSITY" , A )
_UpperCAmelCase : Any = logging.log_levels[env_level_str]
_UpperCAmelCase : Tuple = logging.get_verbosity()
self.assertEqual(
A , A , F"""TRANSFORMERS_VERBOSITY={env_level_str}/{env_level}, but internal verbosity is {current_level}""" , )
# restore to the original level
_UpperCAmelCase : int = ""
transformers.utils.logging._reset_library_root_logger()
@mockenv(TRANSFORMERS_VERBOSITY="super-error" )
def _A ( self : Union[str, Any] ):
# reset for the env var to take effect, next time some logger call is made
transformers.utils.logging._reset_library_root_logger()
_UpperCAmelCase : List[Any] = logging.logging.getLogger()
with CaptureLogger(A ) as cl:
# this action activates the env var
logging.get_logger("transformers.models.bart.tokenization_bart" )
self.assertIn("Unknown option TRANSFORMERS_VERBOSITY=super-error" , cl.out )
# no need to restore as nothing was changed
def _A ( self : List[Any] ):
# testing `logger.warning_advice()`
transformers.utils.logging._reset_library_root_logger()
_UpperCAmelCase : Optional[Any] = logging.get_logger("transformers.models.bart.tokenization_bart" )
_UpperCAmelCase : List[Any] = "Testing 1, 2, 3"
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="1" ):
# nothing should be logged as env var disables this method
with CaptureLogger(A ) as cl:
logger.warning_advice(A )
self.assertEqual(cl.out , "" )
with mockenv_context(TRANSFORMERS_NO_ADVISORY_WARNINGS="" ):
# should log normally as TRANSFORMERS_NO_ADVISORY_WARNINGS is unset
with CaptureLogger(A ) as cl:
logger.warning_advice(A )
self.assertEqual(cl.out , msg + "\n" )
def UpperCamelCase_ ( ) -> List[Any]:
"""simple docstring"""
disable_progress_bar()
assert are_progress_bars_disabled()
enable_progress_bar()
assert not are_progress_bars_disabled()
| 31 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
IMAGENET_STANDARD_MEAN,
IMAGENET_STANDARD_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
if is_vision_available():
import PIL
lowerCAmelCase :Optional[int] = logging.get_logger(__name__)
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
A_ : Tuple = ["""pixel_values"""]
def __init__( self : Dict , _A : bool = True , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = PILImageResampling.BILINEAR , _A : bool = True , _A : Union[int, float] = 1 / 255 , _A : bool = True , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , **_A : int , ) -> None:
super().__init__(**_A )
__magic_name__ : List[str] = size if size is not None else {'shortest_edge': 384}
__magic_name__ : Dict = get_size_dict(_A , default_to_square=_A )
__magic_name__ : List[Any] = do_resize
__magic_name__ : str = size
# Default value set here for backwards compatibility where the value in config is None
__magic_name__ : Optional[Any] = crop_pct if crop_pct is not None else 224 / 256
__magic_name__ : int = resample
__magic_name__ : List[str] = do_rescale
__magic_name__ : List[Any] = rescale_factor
__magic_name__ : str = do_normalize
__magic_name__ : List[str] = image_mean if image_mean is not None else IMAGENET_STANDARD_MEAN
__magic_name__ : int = image_std if image_std is not None else IMAGENET_STANDARD_STD
def __lowerCAmelCase ( self : Optional[Any] , _A : np.ndarray , _A : Dict[str, int] , _A : float , _A : PILImageResampling = PILImageResampling.BICUBIC , _A : Optional[Union[str, ChannelDimension]] = None , **_A : Tuple , ) -> np.ndarray:
__magic_name__ : Optional[int] = get_size_dict(_A , default_to_square=_A )
if "shortest_edge" not in size:
raise ValueError(F'Size dictionary must contain \'shortest_edge\' key. Got {size.keys()}' )
__magic_name__ : Dict = size['shortest_edge']
if shortest_edge < 384:
# maintain same ratio, resizing shortest edge to shortest_edge/crop_pct
__magic_name__ : Dict = int(shortest_edge / crop_pct )
__magic_name__ : str = get_resize_output_image_size(_A , size=_A , default_to_square=_A )
__magic_name__ : Optional[int] = resize(image=_A , size=_A , resample=_A , data_format=_A , **_A )
# then crop to (shortest_edge, shortest_edge)
return center_crop(image=_A , size=(shortest_edge, shortest_edge) , data_format=_A , **_A )
else:
# warping (no cropping) when evaluated at 384 or larger
return resize(
_A , size=(shortest_edge, shortest_edge) , resample=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : int , _A : np.ndarray , _A : Union[int, float] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> int:
return rescale(_A , scale=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : List[Any] , _A : np.ndarray , _A : Union[float, List[float]] , _A : Union[float, List[float]] , _A : Optional[Union[str, ChannelDimension]] = None , **_A : int , ) -> np.ndarray:
return normalize(_A , mean=_A , std=_A , data_format=_A , **_A )
def __lowerCAmelCase ( self : Optional[Any] , _A : ImageInput , _A : bool = None , _A : Dict[str, int] = None , _A : float = None , _A : PILImageResampling = None , _A : bool = None , _A : float = None , _A : bool = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[float, List[float]]] = None , _A : Optional[Union[str, TensorType]] = None , _A : ChannelDimension = ChannelDimension.FIRST , **_A : str , ) -> PIL.Image.Image:
__magic_name__ : int = do_resize if do_resize is not None else self.do_resize
__magic_name__ : Optional[int] = crop_pct if crop_pct is not None else self.crop_pct
__magic_name__ : Optional[Any] = resample if resample is not None else self.resample
__magic_name__ : Optional[Any] = do_rescale if do_rescale is not None else self.do_rescale
__magic_name__ : List[str] = rescale_factor if rescale_factor is not None else self.rescale_factor
__magic_name__ : str = do_normalize if do_normalize is not None else self.do_normalize
__magic_name__ : str = image_mean if image_mean is not None else self.image_mean
__magic_name__ : Dict = image_std if image_std is not None else self.image_std
__magic_name__ : Dict = size if size is not None else self.size
__magic_name__ : List[Any] = get_size_dict(_A , default_to_square=_A )
__magic_name__ : int = make_list_of_images(_A )
if not valid_images(_A ):
raise ValueError(
'Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, '
'torch.Tensor, tf.Tensor or jax.ndarray.' )
if do_resize and size is None or resample is None:
raise ValueError('Size and resample must be specified if do_resize is True.' )
if do_resize and size["shortest_edge"] < 384 and crop_pct is None:
raise ValueError('crop_pct must be specified if size < 384.' )
if do_rescale and rescale_factor is None:
raise ValueError('Rescale factor must be specified if do_rescale is True.' )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError('Image mean and std must be specified if do_normalize is True.' )
# All transformations expect numpy arrays.
__magic_name__ : Optional[Any] = [to_numpy_array(_A ) for image in images]
if do_resize:
__magic_name__ : List[str] = [self.resize(image=_A , size=_A , crop_pct=_A , resample=_A ) for image in images]
if do_rescale:
__magic_name__ : Tuple = [self.rescale(image=_A , scale=_A ) for image in images]
if do_normalize:
__magic_name__ : int = [self.normalize(image=_A , mean=_A , std=_A ) for image in images]
__magic_name__ : Tuple = [to_channel_dimension_format(_A , _A ) for image in images]
__magic_name__ : Union[str, Any] = {'pixel_values': images}
return BatchFeature(data=_A , tensor_type=_A ) | 331 | 0 |
import inspect
import unittest
from transformers import MobileNetVaConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation, MobileNetVaModel
from transformers.models.mobilenet_va.modeling_mobilenet_va import MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import MobileNetVaImageProcessor
class SCREAMING_SNAKE_CASE__ ( lowercase__ ):
def SCREAMING_SNAKE_CASE ( self : Dict ) -> str:
a_ : Dict = self.config_class(**self.inputs_dict )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'tf_padding' ) )
self.parent.assertTrue(hasattr(SCREAMING_SNAKE_CASE__ , 'depth_multiplier' ) )
class SCREAMING_SNAKE_CASE__ :
def __init__( self : str , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str=1_3 , SCREAMING_SNAKE_CASE__ : str=3 , SCREAMING_SNAKE_CASE__ : str=3_2 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=0.25 , SCREAMING_SNAKE_CASE__ : Dict=8 , SCREAMING_SNAKE_CASE__ : str=8 , SCREAMING_SNAKE_CASE__ : int=6 , SCREAMING_SNAKE_CASE__ : int=3_2 , SCREAMING_SNAKE_CASE__ : Dict=True , SCREAMING_SNAKE_CASE__ : Tuple=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : List[str]="relu6" , SCREAMING_SNAKE_CASE__ : Tuple=1_2_8_0 , SCREAMING_SNAKE_CASE__ : Optional[int]=0.1 , SCREAMING_SNAKE_CASE__ : List[str]=0.02 , SCREAMING_SNAKE_CASE__ : int=True , SCREAMING_SNAKE_CASE__ : List[Any]=True , SCREAMING_SNAKE_CASE__ : Union[str, Any]=1_0 , SCREAMING_SNAKE_CASE__ : Union[str, Any]=None , ) -> Optional[int]:
a_ : Any = parent
a_ : List[str] = batch_size
a_ : Optional[int] = num_channels
a_ : Optional[int] = image_size
a_ : List[Any] = depth_multiplier
a_ : List[Any] = depth_divisible_by
a_ : Optional[Any] = min_depth
a_ : Tuple = expand_ratio
a_ : Tuple = tf_padding
a_ : Dict = output_stride
a_ : Optional[int] = first_layer_is_expansion
a_ : int = finegrained_output
a_ : Tuple = hidden_act
a_ : Any = last_hidden_size if finegrained_output else int(last_hidden_size * depth_multiplier )
a_ : List[str] = classifier_dropout_prob
a_ : Any = use_labels
a_ : Dict = is_training
a_ : Optional[Any] = num_labels
a_ : str = initializer_range
a_ : str = scope
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> Optional[int]:
a_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
a_ : Any = None
a_ : Optional[int] = None
if self.use_labels:
a_ : List[Any] = ids_tensor([self.batch_size] , self.num_labels )
a_ : Tuple = ids_tensor([self.batch_size, self.image_size, self.image_size] , self.num_labels )
a_ : Dict = self.get_config()
return config, pixel_values, labels, pixel_labels
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
return MobileNetVaConfig(
num_channels=self.num_channels , image_size=self.image_size , depth_multiplier=self.depth_multiplier , depth_divisible_by=self.depth_divisible_by , min_depth=self.min_depth , expand_ratio=self.expand_ratio , output_stride=self.output_stride , first_layer_is_expansion=self.first_layer_is_expansion , finegrained_output=self.finegrained_output , hidden_act=self.hidden_act , tf_padding=self.tf_padding , classifier_dropout_prob=self.classifier_dropout_prob , initializer_range=self.initializer_range , )
def SCREAMING_SNAKE_CASE ( self : Optional[int] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : str , SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Union[str, Any] ) -> Optional[Any]:
a_ : Optional[int] = MobileNetVaModel(config=SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : List[Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.last_hidden_state.shape , (
self.batch_size,
self.last_hidden_size,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
self.parent.assertEqual(
result.pooler_output.shape , (self.batch_size, self.last_hidden_size) , )
def SCREAMING_SNAKE_CASE ( self : Union[str, Any] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[str] , SCREAMING_SNAKE_CASE__ : List[Any] , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Union[str, Any]:
a_ : str = self.num_labels
a_ : Union[str, Any] = MobileNetVaForImageClassification(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Optional[Any] = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def SCREAMING_SNAKE_CASE ( self : str , SCREAMING_SNAKE_CASE__ : Union[str, Any] , SCREAMING_SNAKE_CASE__ : Dict , SCREAMING_SNAKE_CASE__ : int , SCREAMING_SNAKE_CASE__ : Optional[int] ) -> Any:
a_ : Any = self.num_labels
a_ : str = MobileNetVaForSemanticSegmentation(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
a_ : Union[str, Any] = model(SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
a_ : Any = model(SCREAMING_SNAKE_CASE__ , labels=SCREAMING_SNAKE_CASE__ )
self.parent.assertEqual(
result.logits.shape , (
self.batch_size,
self.num_labels,
self.image_size // self.output_stride,
self.image_size // self.output_stride,
) , )
def SCREAMING_SNAKE_CASE ( self : str ) -> Optional[Any]:
a_ : List[Any] = self.prepare_config_and_inputs()
a_ , a_ , a_ , a_ : str = config_and_inputs
a_ : Any = {'pixel_values': pixel_values}
return config, inputs_dict
@require_torch
class SCREAMING_SNAKE_CASE__ ( lowercase__ , lowercase__ , unittest.TestCase ):
snake_case__ : Optional[int] = (
(MobileNetVaModel, MobileNetVaForImageClassification, MobileNetVaForSemanticSegmentation)
if is_torch_available()
else ()
)
snake_case__ : List[str] = (
{
'''feature-extraction''': MobileNetVaModel,
'''image-classification''': MobileNetVaForImageClassification,
'''image-segmentation''': MobileNetVaForSemanticSegmentation,
}
if is_torch_available()
else {}
)
snake_case__ : int = False
snake_case__ : str = False
snake_case__ : str = False
snake_case__ : Optional[Any] = False
def SCREAMING_SNAKE_CASE ( self : Optional[int] ) -> str:
a_ : Dict = MobileNetVaModelTester(self )
a_ : Union[str, Any] = MobileNetVaConfigTester(self , config_class=SCREAMING_SNAKE_CASE__ , has_text_modality=SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> Dict:
self.config_tester.run_common_tests()
@unittest.skip(reason='MobileNetV2 does not use inputs_embeds' )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> List[Any]:
pass
@unittest.skip(reason='MobileNetV2 does not support input and output embeddings' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> Optional[Any]:
pass
@unittest.skip(reason='MobileNetV2 does not output attentions' )
def SCREAMING_SNAKE_CASE ( self : List[Any] ) -> List[str]:
pass
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
a_ , a_ : Optional[Any] = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : str = model_class(SCREAMING_SNAKE_CASE__ )
a_ : Any = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
a_ : List[str] = [*signature.parameters.keys()]
a_ : Tuple = ['pixel_values']
self.assertListEqual(arg_names[:1] , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Optional[Any]:
a_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : List[str] ) -> Optional[int]:
def check_hidden_states_output(SCREAMING_SNAKE_CASE__ : Tuple , SCREAMING_SNAKE_CASE__ : Any , SCREAMING_SNAKE_CASE__ : Optional[int] ):
a_ : Optional[Any] = model_class(SCREAMING_SNAKE_CASE__ )
model.to(SCREAMING_SNAKE_CASE__ )
model.eval()
with torch.no_grad():
a_ : Tuple = model(**self._prepare_for_class(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ ) )
a_ : Optional[Any] = outputs.hidden_states
a_ : Optional[int] = 1_6
self.assertEqual(len(SCREAMING_SNAKE_CASE__ ) , SCREAMING_SNAKE_CASE__ )
a_ , a_ : str = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
a_ : List[Any] = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
# check that output_hidden_states also work using config
del inputs_dict["output_hidden_states"]
a_ : Tuple = True
check_hidden_states_output(SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ , SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : Optional[Any] ) -> int:
a_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_image_classification(*SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE ( self : str ) -> List[str]:
a_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_semantic_segmentation(*SCREAMING_SNAKE_CASE__ )
@slow
def SCREAMING_SNAKE_CASE ( self : Tuple ) -> Dict:
for model_name in MOBILENET_V2_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
a_ : Any = MobileNetVaModel.from_pretrained(SCREAMING_SNAKE_CASE__ )
self.assertIsNotNone(SCREAMING_SNAKE_CASE__ )
def SCREAMING_SNAKE_CASE_ ( ) -> Dict:
"""simple docstring"""
a_ : Union[str, Any] = Image.open('./tests/fixtures/tests_samples/COCO/000000039769.png' )
return image
@require_torch
@require_vision
class SCREAMING_SNAKE_CASE__ ( unittest.TestCase ):
@cached_property
def SCREAMING_SNAKE_CASE ( self : Dict ) -> Optional[Any]:
return (
MobileNetVaImageProcessor.from_pretrained('google/mobilenet_v2_1.0_224' ) if is_vision_available() else None
)
@slow
def SCREAMING_SNAKE_CASE ( self : int ) -> Union[str, Any]:
a_ : Any = MobileNetVaForImageClassification.from_pretrained('google/mobilenet_v2_1.0_224' ).to(SCREAMING_SNAKE_CASE__ )
a_ : Any = self.default_image_processor
a_ : int = prepare_img()
a_ : Optional[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
a_ : Dict = model(**SCREAMING_SNAKE_CASE__ )
# verify the logits
a_ : Optional[int] = torch.Size((1, 1_0_0_1) )
self.assertEqual(outputs.logits.shape , SCREAMING_SNAKE_CASE__ )
a_ : Tuple = torch.tensor([0.2445, -1.1993, 0.1905] ).to(SCREAMING_SNAKE_CASE__ )
self.assertTrue(torch.allclose(outputs.logits[0, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
@slow
def SCREAMING_SNAKE_CASE ( self : Any ) -> Union[str, Any]:
a_ : Union[str, Any] = MobileNetVaForSemanticSegmentation.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
a_ : Optional[Any] = model.to(SCREAMING_SNAKE_CASE__ )
a_ : int = MobileNetVaImageProcessor.from_pretrained('google/deeplabv3_mobilenet_v2_1.0_513' )
a_ : Dict = prepare_img()
a_ : List[Any] = image_processor(images=SCREAMING_SNAKE_CASE__ , return_tensors='pt' ).to(SCREAMING_SNAKE_CASE__ )
# forward pass
with torch.no_grad():
a_ : Dict = model(**SCREAMING_SNAKE_CASE__ )
a_ : str = outputs.logits
# verify the logits
a_ : str = torch.Size((1, 2_1, 6_5, 6_5) )
self.assertEqual(logits.shape , SCREAMING_SNAKE_CASE__ )
a_ : Dict = torch.tensor(
[
[[17.5790, 17.7581, 18.3355], [18.3257, 18.4230, 18.8973], [18.6169, 18.8650, 19.2187]],
[[-2.1595, -2.0977, -2.3741], [-2.4226, -2.3028, -2.6835], [-2.7819, -2.5991, -2.7706]],
[[4.2058, 4.8317, 4.7638], [4.4136, 5.0361, 4.9383], [4.5028, 4.9644, 4.8734]],
] , device=SCREAMING_SNAKE_CASE__ , )
self.assertTrue(torch.allclose(logits[0, :3, :3, :3] , SCREAMING_SNAKE_CASE__ , atol=1E-4 ) )
| 32 |
'''simple docstring'''
from __future__ import annotations
from math import pi
# Define the Reduced Planck Constant ℏ (H bar), speed of light C, value of
# Pi and the function
lowerCAmelCase :Tuple = 1.0_5_4_5_7_1_8_1_7E-3_4 # unit of ℏ : J * s
lowerCAmelCase :Union[str, Any] = 3E8 # unit of c : m * s^-1
def lowerCamelCase ( lowerCAmelCase : float , lowerCAmelCase : float , lowerCAmelCase : float ):
"""simple docstring"""
if (force, area, distance).count(0 ) != 1:
raise ValueError('One and only one argument must be 0' )
if force < 0:
raise ValueError('Magnitude of force can not be negative' )
if distance < 0:
raise ValueError('Distance can not be negative' )
if area < 0:
raise ValueError('Area can not be negative' )
if force == 0:
__magic_name__ : Any = (REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (
240 * (distance) ** 4
)
return {"force": force}
elif area == 0:
__magic_name__ : Optional[int] = (240 * force * (distance) ** 4) / (
REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2
)
return {"area": area}
elif distance == 0:
__magic_name__ : Union[str, Any] = (
(REDUCED_PLANCK_CONSTANT * SPEED_OF_LIGHT * pi**2 * area) / (240 * force)
) ** (1 / 4)
return {"distance": distance}
raise ValueError('One and only one argument must be 0' )
# Run doctest
if __name__ == "__main__":
import doctest
doctest.testmod() | 331 | 0 |
"""simple docstring"""
import argparse
import fairseq
import torch
from transformers import UniSpeechSatConfig, UniSpeechSatForCTC, UniSpeechSatForPreTraining, logging
logging.set_verbosity_info()
__A : Tuple = logging.get_logger(__name__)
__A : Dict = {
'''post_extract_proj''': '''feature_projection.projection''',
'''encoder.pos_conv.0''': '''encoder.pos_conv_embed.conv''',
'''self_attn.k_proj''': '''encoder.layers.*.attention.k_proj''',
'''self_attn.v_proj''': '''encoder.layers.*.attention.v_proj''',
'''self_attn.q_proj''': '''encoder.layers.*.attention.q_proj''',
'''self_attn.out_proj''': '''encoder.layers.*.attention.out_proj''',
'''self_attn_layer_norm''': '''encoder.layers.*.layer_norm''',
'''fc1''': '''encoder.layers.*.feed_forward.intermediate_dense''',
'''fc2''': '''encoder.layers.*.feed_forward.output_dense''',
'''final_layer_norm''': '''encoder.layers.*.final_layer_norm''',
'''encoder.layer_norm''': '''encoder.layer_norm''',
'''encoder.layer_norm_for_extract''': '''layer_norm_for_extract''',
'''w2v_model.layer_norm''': '''feature_projection.layer_norm''',
'''quantizer.weight_proj''': '''quantizer.weight_proj''',
'''quantizer.vars''': '''quantizer.codevectors''',
'''project_q''': '''project_q''',
'''final_proj''': '''project_hid''',
'''w2v_encoder.proj''': '''lm_head''',
'''label_embs_concat''': '''label_embeddings_concat''',
'''mask_emb''': '''masked_spec_embed''',
'''spk_proj''': '''speaker_proj''',
}
__A : Tuple = [
'''lm_head''',
'''quantizer.weight_proj''',
'''quantizer.codevectors''',
'''project_q''',
'''project_hid''',
'''label_embeddings_concat''',
'''speaker_proj''',
'''layer_norm_for_extract''',
]
def lowercase ( __snake_case : Optional[Any] , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[Any] , __snake_case : List[Any] ):
for attribute in key.split('''.''' ):
lowercase_ : Union[str, Any] = getattr(__snake_case , __snake_case )
if weight_type is not None:
lowercase_ : List[str] = getattr(__snake_case , __snake_case ).shape
else:
lowercase_ : Optional[Any] = hf_pointer.shape
if hf_shape != value.shape:
raise ValueError(
F'''Shape of hf {key + '.' + weight_type if weight_type is not None else ''} is {hf_shape}, but should be'''
F''' {value.shape} for {full_name}''' )
if weight_type == "weight":
lowercase_ : Union[str, Any] = value
elif weight_type == "weight_g":
lowercase_ : Union[str, Any] = value
elif weight_type == "weight_v":
lowercase_ : Optional[Any] = value
elif weight_type == "bias":
lowercase_ : Tuple = value
else:
lowercase_ : List[Any] = value
logger.info(F'''{key + '.' + weight_type if weight_type is not None else ''} was initialized from {full_name}.''' )
def lowercase ( __snake_case : Tuple , __snake_case : Dict ):
lowercase_ : str = []
lowercase_ : int = fairseq_model.state_dict()
lowercase_ : List[str] = hf_model.unispeech_sat.feature_extractor
for name, value in fairseq_dict.items():
lowercase_ : List[Any] = False
if "conv_layers" in name:
load_conv_layer(
__snake_case , __snake_case , __snake_case , __snake_case , hf_model.config.feat_extract_norm == '''group''' , )
lowercase_ : Union[str, Any] = True
else:
for key, mapped_key in MAPPING.items():
lowercase_ : Dict = '''unispeech_sat.''' + mapped_key if mapped_key not in TOP_LEVEL_KEYS else mapped_key
if key in name or key.split('''w2v_model.''' )[-1] == name.split('''.''' )[0]:
if "layer_norm_for_extract" in name and (".".join(name.split('''.''' )[:-1] ) != key):
# special case since naming is very similar
continue
lowercase_ : Optional[int] = True
if "*" in mapped_key:
lowercase_ : Optional[Any] = name.split(__snake_case )[0].split('''.''' )[-2]
lowercase_ : Optional[Any] = mapped_key.replace('''*''' , __snake_case )
if "weight_g" in name:
lowercase_ : List[str] = '''weight_g'''
elif "weight_v" in name:
lowercase_ : str = '''weight_v'''
elif "bias" in name:
lowercase_ : Optional[Any] = '''bias'''
elif "weight" in name:
# TODO: don't match quantizer.weight_proj
lowercase_ : str = '''weight'''
else:
lowercase_ : Union[str, Any] = None
set_recursively(__snake_case , __snake_case , __snake_case , __snake_case , __snake_case )
continue
if not is_used:
unused_weights.append(__snake_case )
logger.warning(F'''Unused weights: {unused_weights}''' )
def lowercase ( __snake_case : List[Any] , __snake_case : Any , __snake_case : Dict , __snake_case : Dict , __snake_case : Optional[Any] ):
lowercase_ : Tuple = full_name.split('''conv_layers.''' )[-1]
lowercase_ : Union[str, Any] = name.split('''.''' )
lowercase_ : str = int(items[0] )
lowercase_ : Dict = int(items[1] )
if type_id == 0:
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.bias.data.shape} was found.''' )
lowercase_ : Optional[Any] = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].conv.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor.conv_layers[layer_id].conv.weight.data.shape} was found.''' )
lowercase_ : Dict = value
logger.info(F'''Feat extract conv layer {layer_id} was initialized from {full_name}.''' )
elif (type_id == 2 and not use_group_norm) or (type_id == 2 and layer_id == 0 and use_group_norm):
if "bias" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.bias.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.bias.data.shape} was found.''' )
lowercase_ : Optional[int] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
elif "weight" in name:
if value.shape != feature_extractor.conv_layers[layer_id].layer_norm.weight.data.shape:
raise ValueError(
F'''{full_name} has size {value.shape}, but'''
F''' {feature_extractor[layer_id].layer_norm.weight.data.shape} was found.''' )
lowercase_ : Optional[Any] = value
logger.info(F'''Feat extract layer norm weight of layer {layer_id} was initialized from {full_name}.''' )
else:
unused_weights.append(__snake_case )
@torch.no_grad()
def lowercase ( __snake_case : str , __snake_case : Optional[Any] , __snake_case : List[Any]=None , __snake_case : Optional[Any]=None , __snake_case : List[Any]=True ):
if config_path is not None:
lowercase_ : List[Any] = UniSpeechSatConfig.from_pretrained(__snake_case )
else:
lowercase_ : List[str] = UniSpeechSatConfig()
lowercase_ : Union[str, Any] = ''''''
if is_finetuned:
lowercase_ : List[Any] = UniSpeechSatForCTC(__snake_case )
else:
lowercase_ : Dict = UniSpeechSatForPreTraining(__snake_case )
lowercase_ , lowercase_ , lowercase_ : str = fairseq.checkpoint_utils.load_model_ensemble_and_task(
[checkpoint_path] , arg_overrides={'''data''': '''/'''.join(dict_path.split('''/''' )[:-1] )} )
lowercase_ : Any = model[0].eval()
recursively_load_weights(__snake_case , __snake_case )
hf_wavavec.save_pretrained(__snake_case )
if __name__ == "__main__":
__A : Tuple = argparse.ArgumentParser()
parser.add_argument('''--pytorch_dump_folder_path''', default=None, type=str, help='''Path to the output PyTorch model.''')
parser.add_argument('''--checkpoint_path''', default=None, type=str, help='''Path to fairseq checkpoint''')
parser.add_argument('''--dict_path''', default=None, type=str, help='''Path to dict of fine-tuned model''')
parser.add_argument('''--config_path''', default=None, type=str, help='''Path to hf config.json of model to convert''')
parser.add_argument(
'''--not_finetuned''', action='''store_true''', help='''Whether the model to convert is a fine-tuned model or not'''
)
__A : Optional[int] = parser.parse_args()
convert_unispeech_sat_checkpoint(
args.checkpoint_path, args.pytorch_dump_folder_path, args.config_path, args.dict_path, not args.not_finetuned
)
| 33 |
'''simple docstring'''
import os
from itertools import chain
from random import randrange, shuffle
import pytest
from .sola import PokerHand
lowerCAmelCase :Tuple = (
'''4S 3H 2C 7S 5H''',
'''9D 8H 2C 6S 7H''',
'''2D 6D 9D TH 7D''',
'''TC 8C 2S JH 6C''',
'''JH 8S TH AH QH''',
'''TS KS 5S 9S AC''',
'''KD 6S 9D TH AD''',
'''KS 8D 4D 9S 4S''', # pair
'''8C 4S KH JS 4D''', # pair
'''QH 8H KD JH 8S''', # pair
'''KC 4H KS 2H 8D''', # pair
'''KD 4S KC 3H 8S''', # pair
'''AH 8S AS KC JH''', # pair
'''3H 4C 4H 3S 2H''', # 2 pairs
'''5S 5D 2C KH KH''', # 2 pairs
'''3C KH 5D 5S KH''', # 2 pairs
'''AS 3C KH AD KH''', # 2 pairs
'''7C 7S 3S 7H 5S''', # 3 of a kind
'''7C 7S KH 2H 7H''', # 3 of a kind
'''AC KH QH AH AS''', # 3 of a kind
'''2H 4D 3C AS 5S''', # straight (low ace)
'''3C 5C 4C 2C 6H''', # straight
'''6S 8S 7S 5H 9H''', # straight
'''JS QS 9H TS KH''', # straight
'''QC KH TS JS AH''', # straight (high ace)
'''8C 9C 5C 3C TC''', # flush
'''3S 8S 9S 5S KS''', # flush
'''4C 5C 9C 8C KC''', # flush
'''JH 8H AH KH QH''', # flush
'''3D 2H 3H 2C 2D''', # full house
'''2H 2C 3S 3H 3D''', # full house
'''KH KC 3S 3H 3D''', # full house
'''JC 6H JS JD JH''', # 4 of a kind
'''JC 7H JS JD JH''', # 4 of a kind
'''JC KH JS JD JH''', # 4 of a kind
'''2S AS 4S 5S 3S''', # straight flush (low ace)
'''2D 6D 3D 4D 5D''', # straight flush
'''5C 6C 3C 7C 4C''', # straight flush
'''JH 9H TH KH QH''', # straight flush
'''JH AH TH KH QH''', # royal flush (high ace straight flush)
)
lowerCAmelCase :List[Any] = (
('''2H 3H 4H 5H 6H''', '''KS AS TS QS JS''', '''Loss'''),
('''2H 3H 4H 5H 6H''', '''AS AD AC AH JD''', '''Win'''),
('''AS AH 2H AD AC''', '''JS JD JC JH 3D''', '''Win'''),
('''2S AH 2H AS AC''', '''JS JD JC JH AD''', '''Loss'''),
('''2S AH 2H AS AC''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''AS 3S 4S 8S 2S''', '''2H 3H 5H 6H 7H''', '''Win'''),
('''2H 3H 5H 6H 7H''', '''2S 3H 4H 5S 6C''', '''Win'''),
('''2S 3H 4H 5S 6C''', '''3D 4C 5H 6H 2S''', '''Tie'''),
('''2S 3H 4H 5S 6C''', '''AH AC 5H 6H AS''', '''Win'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H AS''', '''Loss'''),
('''2S 2H 4H 5S 4C''', '''AH AC 5H 6H 7S''', '''Win'''),
('''6S AD 7H 4S AS''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S AH 4H 5S KC''', '''AH AC 5H 6H 7S''', '''Loss'''),
('''2S 3H 6H 7S 9C''', '''7H 3C TH 6H 9S''', '''Loss'''),
('''4S 5H 6H TS AC''', '''3S 5H 6H TS AC''', '''Win'''),
('''2S AH 4H 5S 6C''', '''AD 4C 5H 6H 2C''', '''Tie'''),
('''AS AH 3H AD AC''', '''AS AH 2H AD AC''', '''Win'''),
('''AH AC 5H 5C QS''', '''AH AC 5H 5C KS''', '''Loss'''),
('''AH AC 5H 5C QS''', '''KH KC 5H 5C QS''', '''Win'''),
('''7C 7S KH 2H 7H''', '''3C 3S AH 2H 3H''', '''Win'''),
('''3C 3S AH 2H 3H''', '''7C 7S KH 2H 7H''', '''Loss'''),
('''6H 5H 4H 3H 2H''', '''5H 4H 3H 2H AH''', '''Win'''),
('''5H 4H 3H 2H AH''', '''5H 4H 3H 2H AH''', '''Tie'''),
('''5H 4H 3H 2H AH''', '''6H 5H 4H 3H 2H''', '''Loss'''),
('''AH AD KS KC AC''', '''AH KD KH AC KC''', '''Win'''),
('''2H 4D 3C AS 5S''', '''2H 4D 3C 6S 5S''', '''Loss'''),
('''2H 3S 3C 3H 2S''', '''3S 3C 2S 2H 2D''', '''Win'''),
('''4D 6D 5D 2D JH''', '''3S 8S 3H TC KH''', '''Loss'''),
('''4S 6C 8S 3S 7S''', '''AD KS 2D 7D 7C''', '''Loss'''),
('''6S 4C 7H 8C 3H''', '''5H JC AH 9D 9C''', '''Loss'''),
('''9D 9H JH TC QH''', '''3C 2S JS 5C 7H''', '''Win'''),
('''2H TC 8S AD 9S''', '''4H TS 7H 2C 5C''', '''Win'''),
('''9D 3S 2C 7S 7C''', '''JC TD 3C TC 9H''', '''Loss'''),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', True),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', False),
('''AS 3S 4S 8S 2S''', True),
)
lowerCAmelCase :str = (
('''2H 3H 4H 5H 6H''', True),
('''AS AH 2H AD AC''', False),
('''2H 3H 5H 6H 7H''', False),
('''KS AS TS QS JS''', True),
('''8H 9H QS JS TH''', True),
)
lowerCAmelCase :Optional[Any] = (
('''2H 4D 3C AS 5S''', True, [5, 4, 3, 2, 1_4]),
('''2H 5D 3C AS 5S''', False, [1_4, 5, 5, 3, 2]),
('''JH QD KC AS TS''', False, [1_4, 1_3, 1_2, 1_1, 1_0]),
('''9D 3S 2C 7S 7C''', False, [9, 7, 7, 3, 2]),
)
lowerCAmelCase :Union[str, Any] = (
('''JH AH TH KH QH''', 0),
('''JH 9H TH KH QH''', 0),
('''JC KH JS JD JH''', 7),
('''KH KC 3S 3H 3D''', 6),
('''8C 9C 5C 3C TC''', 0),
('''JS QS 9H TS KH''', 0),
('''7C 7S KH 2H 7H''', 3),
('''3C KH 5D 5S KH''', 2),
('''QH 8H KD JH 8S''', 1),
('''2D 6D 9D TH 7D''', 0),
)
lowerCAmelCase :Tuple = (
('''JH AH TH KH QH''', 2_3),
('''JH 9H TH KH QH''', 2_2),
('''JC KH JS JD JH''', 2_1),
('''KH KC 3S 3H 3D''', 2_0),
('''8C 9C 5C 3C TC''', 1_9),
('''JS QS 9H TS KH''', 1_8),
('''7C 7S KH 2H 7H''', 1_7),
('''3C KH 5D 5S KH''', 1_6),
('''QH 8H KD JH 8S''', 1_5),
('''2D 6D 9D TH 7D''', 1_4),
)
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ , __magic_name__ : Union[str, Any] = randrange(len(lowerCAmelCase ) ), randrange(len(lowerCAmelCase ) )
__magic_name__ : Optional[int] = ['Loss', 'Tie', 'Win'][(play >= oppo) + (play > oppo)]
__magic_name__ , __magic_name__ : Optional[int] = SORTED_HANDS[play], SORTED_HANDS[oppo]
return hand, other, expected
def lowerCamelCase ( lowerCAmelCase : int = 100 ):
"""simple docstring"""
return (generate_random_hand() for _ in range(lowerCAmelCase ))
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_flush() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : List[Any] , lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_straight() == expected
@pytest.mark.parametrize('hand, expected, card_values' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Tuple ):
"""simple docstring"""
__magic_name__ : Any = PokerHand(lowerCAmelCase )
assert player._is_five_high_straight() == expected
assert player._card_values == card_values
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Any , lowerCAmelCase : str ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._is_same_kind() == expected
@pytest.mark.parametrize('hand, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : Dict , lowerCAmelCase : Dict ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase )._hand_type == expected
@pytest.mark.parametrize('hand, other, expected' , lowerCAmelCase )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str , lowerCAmelCase : Tuple ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
@pytest.mark.parametrize('hand, other, expected' , generate_random_hands() )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : Optional[Any] , lowerCAmelCase : Any ):
"""simple docstring"""
assert PokerHand(lowerCAmelCase ).compare_with(PokerHand(lowerCAmelCase ) ) == expected
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Optional[int] = [PokerHand(lowerCAmelCase ) for hand in SORTED_HANDS]
__magic_name__ : Tuple = poker_hands.copy()
shuffle(lowerCAmelCase )
__magic_name__ : Union[str, Any] = chain(sorted(lowerCAmelCase ) )
for index, hand in enumerate(lowerCAmelCase ):
assert hand == poker_hands[index]
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = [PokerHand('2D AC 3H 4H 5S' ), PokerHand('2S 3H 4H 5S 6C' )]
pokerhands.sort(reverse=lowerCAmelCase )
assert pokerhands[0].__str__() == "2S 3H 4H 5S 6C"
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = PokerHand('2C 4S AS 3D 5C' )
__magic_name__ : Optional[Any] = True
__magic_name__ : Union[str, Any] = [5, 4, 3, 2, 14]
for _ in range(10 ):
assert pokerhand._is_five_high_straight() == expected
assert pokerhand._card_values == expected_card_values
def lowerCamelCase ( ):
"""simple docstring"""
__magic_name__ : Dict = 0
__magic_name__ : Dict = os.path.abspath(os.path.dirname(lowerCAmelCase ) )
__magic_name__ : Union[str, Any] = os.path.join(lowerCAmelCase , 'poker_hands.txt' )
with open(lowerCAmelCase ) as file_hand:
for line in file_hand:
__magic_name__ : Optional[int] = line[:14].strip()
__magic_name__ : List[Any] = line[15:].strip()
__magic_name__ , __magic_name__ : Tuple = PokerHand(lowerCAmelCase ), PokerHand(lowerCAmelCase )
__magic_name__ : List[Any] = player.compare_with(lowerCAmelCase )
if output == "Win":
answer += 1
assert answer == 376 | 331 | 0 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
import torch
from huggingface_hub import cached_download, hf_hub_url
from transformers import AutoImageProcessor, CvtConfig, CvtForImageClassification
def snake_case_ (_a : str ):
UpperCAmelCase = []
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.weight",
F"stage{idx}.patch_embed.proj.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.projection.bias",
F"stage{idx}.patch_embed.proj.bias",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.weight",
F"stage{idx}.patch_embed.norm.weight",
) )
embed.append(
(
F"cvt.encoder.stages.{idx}.embedding.convolution_embeddings.normalization.bias",
F"stage{idx}.patch_embed.norm.bias",
) )
return embed
def snake_case_ (_a : Union[str, Any] , _a : str ):
UpperCAmelCase = []
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_query.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_q.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_key.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_k.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.convolution.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.conv.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.weight",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.bias",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_mean",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_mean",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.running_var",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.running_var",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.convolution_projection_value.convolution_projection.normalization.num_batches_tracked",
F"stage{idx}.blocks.{cnt}.attn.conv_proj_v.bn.num_batches_tracked",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_q.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_query.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_q.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_k.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_key.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_k.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.weight",
F"stage{idx}.blocks.{cnt}.attn.proj_v.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.attention.projection_value.bias",
F"stage{idx}.blocks.{cnt}.attn.proj_v.bias",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.weight",
F"stage{idx}.blocks.{cnt}.attn.proj.weight",
) )
attention_weights.append(
(
F"cvt.encoder.stages.{idx}.layers.{cnt}.attention.output.dense.bias",
F"stage{idx}.blocks.{cnt}.attn.proj.bias",
) )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.intermediate.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.weight", F"stage{idx}.blocks.{cnt}.mlp.fc2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.output.dense.bias", F"stage{idx}.blocks.{cnt}.mlp.fc2.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.weight", F"stage{idx}.blocks.{cnt}.norm1.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_before.bias", F"stage{idx}.blocks.{cnt}.norm1.bias") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.weight", F"stage{idx}.blocks.{cnt}.norm2.weight") )
attention_weights.append(
(F"cvt.encoder.stages.{idx}.layers.{cnt}.layernorm_after.bias", F"stage{idx}.blocks.{cnt}.norm2.bias") )
return attention_weights
def snake_case_ (_a : Union[str, Any] ):
UpperCAmelCase = []
token.append((F"cvt.encoder.stages.{idx}.cls_token", '''stage2.cls_token''') )
return token
def snake_case_ ():
UpperCAmelCase = []
head.append(('''layernorm.weight''', '''norm.weight''') )
head.append(('''layernorm.bias''', '''norm.bias''') )
head.append(('''classifier.weight''', '''head.weight''') )
head.append(('''classifier.bias''', '''head.bias''') )
return head
def snake_case_ (_a : Optional[Any] , _a : str , _a : Dict , _a : Optional[Any] ):
UpperCAmelCase = '''imagenet-1k-id2label.json'''
UpperCAmelCase = 1_0_0_0
UpperCAmelCase = '''huggingface/label-files'''
UpperCAmelCase = num_labels
UpperCAmelCase = json.load(open(cached_download(hf_hub_url(_a , _a , repo_type='''dataset''' ) ) , '''r''' ) )
UpperCAmelCase = {int(_a ): v for k, v in idalabel.items()}
UpperCAmelCase = idalabel
UpperCAmelCase = {v: k for k, v in idalabel.items()}
UpperCAmelCase = UpperCAmelCase = CvtConfig(num_labels=_a , idalabel=_a , labelaid=_a )
# For depth size 13 (13 = 1+2+10)
if cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "13":
UpperCAmelCase = [1, 2, 1_0]
# For depth size 21 (21 = 1+4+16)
elif cvt_model.rsplit('''/''' , 1 )[-1][4:6] == "21":
UpperCAmelCase = [1, 4, 1_6]
# For wide cvt (similar to wide-resnet) depth size 24 (w24 = 2 + 2 20)
else:
UpperCAmelCase = [2, 2, 2_0]
UpperCAmelCase = [3, 1_2, 1_6]
UpperCAmelCase = [1_9_2, 7_6_8, 1_0_2_4]
UpperCAmelCase = CvtForImageClassification(_a )
UpperCAmelCase = AutoImageProcessor.from_pretrained('''facebook/convnext-base-224-22k-1k''' )
UpperCAmelCase = image_size
UpperCAmelCase = torch.load(_a , map_location=torch.device('''cpu''' ) )
UpperCAmelCase = OrderedDict()
UpperCAmelCase = []
for idx in range(len(config.depth ) ):
if config.cls_token[idx]:
UpperCAmelCase = list_of_state_dict + cls_token(_a )
UpperCAmelCase = list_of_state_dict + embeddings(_a )
for cnt in range(config.depth[idx] ):
UpperCAmelCase = list_of_state_dict + attention(_a , _a )
UpperCAmelCase = list_of_state_dict + final()
for gg in list_of_state_dict:
print(_a )
for i in range(len(_a ) ):
UpperCAmelCase = original_weights[list_of_state_dict[i][1]]
model.load_state_dict(_a )
model.save_pretrained(_a )
image_processor.save_pretrained(_a )
# Download the weights from zoo: https://1drv.ms/u/s!AhIXJn_J-blW9RzF3rMW7SsLHa8h?e=blQ0Al
if __name__ == "__main__":
A =argparse.ArgumentParser()
parser.add_argument(
'--cvt_model',
default='cvt-w24',
type=str,
help='Name of the cvt model you\'d like to convert.',
)
parser.add_argument(
'--image_size',
default=3_84,
type=int,
help='Input Image Size',
)
parser.add_argument(
'--cvt_file_name',
default=r'cvtmodels\CvT-w24-384x384-IN-22k.pth',
type=str,
help='Input Image Size',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
A =parser.parse_args()
convert_cvt_checkpoint(args.cvt_model, args.image_size, args.cvt_file_name, args.pytorch_dump_folder_path)
| 34 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_tf_available,
is_torch_available,
)
lowerCAmelCase :Union[str, Any] = {
'''configuration_vision_encoder_decoder''': ['''VisionEncoderDecoderConfig''', '''VisionEncoderDecoderOnnxConfig''']
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :str = ['''VisionEncoderDecoderModel''']
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Optional[int] = ['''TFVisionEncoderDecoderModel''']
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
lowerCAmelCase :Union[str, Any] = ['''FlaxVisionEncoderDecoderModel''']
if TYPE_CHECKING:
from .configuration_vision_encoder_decoder import VisionEncoderDecoderConfig, VisionEncoderDecoderOnnxConfig
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_vision_encoder_decoder import VisionEncoderDecoderModel
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_vision_encoder_decoder import TFVisionEncoderDecoderModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_vision_encoder_decoder import FlaxVisionEncoderDecoderModel
else:
import sys
lowerCAmelCase :int = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__) | 331 | 0 |
'''simple docstring'''
from typing import List
import datasets
from datasets.tasks import AudioClassification
from ..folder_based_builder import folder_based_builder
__a = datasets.utils.logging.get_logger(__name__)
class UpperCAmelCase_ ( folder_based_builder.FolderBasedBuilderConfig ):
"""simple docstring"""
lowercase = None
lowercase = None
class UpperCAmelCase_ ( folder_based_builder.FolderBasedBuilder ):
"""simple docstring"""
lowercase = datasets.Audio()
lowercase = "audio"
lowercase = AudioFolderConfig
lowercase = 42 # definition at the bottom of the script
lowercase = AudioClassification(audio_column="audio" , label_column="label" )
__a = [
".aiff",
".au",
".avr",
".caf",
".flac",
".htk",
".svx",
".mat4",
".mat5",
".mpc2k",
".ogg",
".paf",
".pvf",
".raw",
".rf64",
".sd2",
".sds",
".ircam",
".voc",
".w64",
".wav",
".nist",
".wavex",
".wve",
".xi",
".mp3",
".opus",
]
__a = AUDIO_EXTENSIONS
| 35 |
'''simple docstring'''
from collections import UserDict
from typing import List, Union
from ..utils import (
add_end_docstrings,
is_tf_available,
is_torch_available,
is_vision_available,
logging,
requires_backends,
)
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from PIL import Image
from ..image_utils import load_image
if is_torch_available():
from ..models.auto.modeling_auto import MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if is_tf_available():
from ..models.auto.modeling_tf_auto import TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
from ..tf_utils import stable_softmax
lowerCAmelCase :Any = logging.get_logger(__name__)
@add_end_docstrings(lowercase__ )
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __init__( self : Optional[Any] , **_A : Union[str, Any] ) -> Tuple:
super().__init__(**_A )
requires_backends(self , 'vision' )
self.check_model_type(
TF_MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING
if self.framework == 'tf'
else MODEL_FOR_ZERO_SHOT_IMAGE_CLASSIFICATION_MAPPING )
def __call__( self : Optional[int] , _A : Union[str, List[str], "Image", List["Image"]] , **_A : Dict ) -> Dict:
return super().__call__(_A , **_A )
def __lowerCAmelCase ( self : Any , **_A : Dict ) -> Optional[int]:
__magic_name__ : str = {}
if "candidate_labels" in kwargs:
__magic_name__ : str = kwargs['candidate_labels']
if "hypothesis_template" in kwargs:
__magic_name__ : Tuple = kwargs['hypothesis_template']
return preprocess_params, {}, {}
def __lowerCAmelCase ( self : str , _A : Dict , _A : Optional[Any]=None , _A : int="This is a photo of {}." ) -> int:
__magic_name__ : Dict = load_image(_A )
__magic_name__ : List[str] = self.image_processor(images=[image] , return_tensors=self.framework )
__magic_name__ : Optional[Any] = candidate_labels
__magic_name__ : List[Any] = [hypothesis_template.format(_A ) for x in candidate_labels]
__magic_name__ : str = self.tokenizer(_A , return_tensors=self.framework , padding=_A )
__magic_name__ : Optional[Any] = [text_inputs]
return inputs
def __lowerCAmelCase ( self : Union[str, Any] , _A : Tuple ) -> str:
__magic_name__ : str = model_inputs.pop('candidate_labels' )
__magic_name__ : str = model_inputs.pop('text_inputs' )
if isinstance(text_inputs[0] , _A ):
__magic_name__ : Dict = text_inputs[0]
else:
# Batching case.
__magic_name__ : Optional[Any] = text_inputs[0][0]
__magic_name__ : List[Any] = self.model(**_A , **_A )
__magic_name__ : str = {
'candidate_labels': candidate_labels,
'logits': outputs.logits_per_image,
}
return model_outputs
def __lowerCAmelCase ( self : Optional[int] , _A : Optional[Any] ) -> Optional[int]:
__magic_name__ : Tuple = model_outputs.pop('candidate_labels' )
__magic_name__ : Union[str, Any] = model_outputs['logits'][0]
if self.framework == "pt":
__magic_name__ : Tuple = logits.softmax(dim=-1 ).squeeze(-1 )
__magic_name__ : Tuple = probs.tolist()
if not isinstance(_A , _A ):
__magic_name__ : Any = [scores]
elif self.framework == "tf":
__magic_name__ : Any = stable_softmax(_A , axis=-1 )
__magic_name__ : Dict = probs.numpy().tolist()
else:
raise ValueError(F'Unsupported framework: {self.framework}' )
__magic_name__ : Union[str, Any] = [
{'score': score, 'label': candidate_label}
for score, candidate_label in sorted(zip(_A , _A ) , key=lambda _A : -x[0] )
]
return result | 331 | 0 |
import argparse
import json
from typing import List
from ltp import LTP
from transformers import BertTokenizer
def A ( _lowerCamelCase ):
'''simple docstring'''
if (
(cp >= 0X4E_00 and cp <= 0X9F_FF)
or (cp >= 0X34_00 and cp <= 0X4D_BF) #
or (cp >= 0X2_00_00 and cp <= 0X2_A6_DF) #
or (cp >= 0X2_A7_00 and cp <= 0X2_B7_3F) #
or (cp >= 0X2_B7_40 and cp <= 0X2_B8_1F) #
or (cp >= 0X2_B8_20 and cp <= 0X2_CE_AF) #
or (cp >= 0XF9_00 and cp <= 0XFA_FF)
or (cp >= 0X2_F8_00 and cp <= 0X2_FA_1F) #
): #
return True
return False
def A ( _lowerCamelCase ):
'''simple docstring'''
for char in word:
_lowerCAmelCase : int = ord(_lowerCamelCase )
if not _is_chinese_char(_lowerCamelCase ):
return 0
return 1
def A ( _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Union[str, Any] = set()
for token in tokens:
_lowerCAmelCase : Optional[Any] = len(_lowerCamelCase ) > 1 and is_chinese(_lowerCamelCase )
if chinese_word:
word_set.add(_lowerCamelCase )
_lowerCAmelCase : Union[str, Any] = list(_lowerCamelCase )
return word_list
def A ( _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
if not chinese_word_set:
return bert_tokens
_lowerCAmelCase : Any = max([len(_lowerCamelCase ) for w in chinese_word_set] )
_lowerCAmelCase : Tuple = bert_tokens
_lowerCAmelCase , _lowerCAmelCase : List[str] = 0, len(_lowerCamelCase )
while start < end:
_lowerCAmelCase : List[str] = True
if is_chinese(bert_word[start] ):
_lowerCAmelCase : Optional[Any] = min(end - start , _lowerCamelCase )
for i in range(_lowerCamelCase , 1 , -1 ):
_lowerCAmelCase : Tuple = "".join(bert_word[start : start + i] )
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i ):
_lowerCAmelCase : Optional[int] = "##" + bert_word[j]
_lowerCAmelCase : Dict = start + i
_lowerCAmelCase : Optional[Any] = False
break
if single_word:
start += 1
return bert_word
def A ( _lowerCamelCase , _lowerCamelCase , _lowerCamelCase ):
'''simple docstring'''
_lowerCAmelCase : Dict = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : List[Any] = ltp_tokenizer.seg(lines[i : i + 100] )[0]
_lowerCAmelCase : Tuple = [get_chinese_word(_lowerCamelCase ) for r in res]
ltp_res.extend(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : str = []
for i in range(0 , len(_lowerCamelCase ) , 100 ):
_lowerCAmelCase : List[Any] = bert_tokenizer(lines[i : i + 100] , add_special_tokens=_lowerCamelCase , truncation=_lowerCamelCase , max_length=512 )
bert_res.extend(res["input_ids"] )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
_lowerCAmelCase : Any = []
for input_ids, chinese_word in zip(_lowerCamelCase , _lowerCamelCase ):
_lowerCAmelCase : Union[str, Any] = []
for id in input_ids:
_lowerCAmelCase : List[Any] = bert_tokenizer._convert_id_to_token(_lowerCamelCase )
input_tokens.append(_lowerCamelCase )
_lowerCAmelCase : Optional[int] = add_sub_symbol(_lowerCamelCase , _lowerCamelCase )
_lowerCAmelCase : List[str] = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(_lowerCamelCase ):
if token[:2] == "##":
_lowerCAmelCase : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(_lowerCamelCase ) == 1 and _is_chinese_char(ord(_lowerCamelCase ) ):
ref_id.append(_lowerCamelCase )
ref_ids.append(_lowerCamelCase )
assert len(_lowerCamelCase ) == len(_lowerCamelCase )
return ref_ids
def A ( _lowerCamelCase ):
'''simple docstring'''
with open(args.file_name , "r" , encoding="utf-8" ) as f:
_lowerCAmelCase : List[str] = f.readlines()
_lowerCAmelCase : int = [line.strip() for line in data if len(_lowerCamelCase ) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
_lowerCAmelCase : Dict = LTP(args.ltp ) # faster in GPU device
_lowerCAmelCase : int = BertTokenizer.from_pretrained(args.bert )
_lowerCAmelCase : Optional[Any] = prepare_ref(_lowerCamelCase , _lowerCamelCase , _lowerCamelCase )
with open(args.save_path , "w" , encoding="utf-8" ) as f:
_lowerCAmelCase : Union[str, Any] = [json.dumps(_lowerCamelCase ) + "\n" for ref in ref_ids]
f.writelines(_lowerCamelCase )
if __name__ == "__main__":
_snake_case = argparse.ArgumentParser(description="prepare_chinese_ref")
parser.add_argument(
"--file_name",
type=str,
default="./resources/chinese-demo.txt",
help="file need process, same as training data in lm",
)
parser.add_argument(
"--ltp", type=str, default="./resources/ltp", help="resources for LTP tokenizer, usually a path"
)
parser.add_argument("--bert", type=str, default="./resources/robert", help="resources for Bert tokenizer")
parser.add_argument("--save_path", type=str, default="./resources/ref.txt", help="path to save res")
_snake_case = parser.parse_args()
main(args)
| 36 |
'''simple docstring'''
import json
import os
import re
import shutil
import tempfile
import unittest
from typing import Tuple
from transformers import AddedToken, BatchEncoding, ByTaTokenizer
from transformers.utils import cached_property, is_tf_available, is_torch_available
from ...test_tokenization_common import TokenizerTesterMixin
if is_torch_available():
lowerCAmelCase :int = '''pt'''
elif is_tf_available():
lowerCAmelCase :Optional[Any] = '''tf'''
else:
lowerCAmelCase :Optional[Any] = '''jax'''
class _lowerCamelCase ( lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Tuple = ByTaTokenizer
A_ : Dict = False
def __lowerCAmelCase ( self : List[str] ) -> Optional[Any]:
super().setUp()
__magic_name__ : Any = ByTaTokenizer()
tokenizer.save_pretrained(self.tmpdirname )
@cached_property
def __lowerCAmelCase ( self : Union[str, Any] ) -> List[str]:
return ByTaTokenizer.from_pretrained('google/byt5-small' )
def __lowerCAmelCase ( self : Tuple , **_A : Optional[int] ) -> ByTaTokenizer:
return self.tokenizer_class.from_pretrained(self.tmpdirname , **_A )
def __lowerCAmelCase ( self : Optional[int] , _A : Union[str, Any] , _A : int=False , _A : Union[str, Any]=20 , _A : Optional[int]=5 ) -> Tuple[str, list]:
# XXX The default common tokenizer tests assume that every ID is decodable on its own.
# This assumption is invalid for ByT5 because single bytes might not be
# valid utf-8 (byte 128 for instance).
# Here we're overriding the smallest possible method to provide
# a clean sequence without making the same assumption.
__magic_name__ : Optional[Any] = []
for i in range(len(_A ) ):
try:
__magic_name__ : Optional[Any] = tokenizer.decode([i] , clean_up_tokenization_spaces=_A )
except UnicodeDecodeError:
pass
toks.append((i, tok) )
__magic_name__ : Any = list(filter(lambda _A : re.match(R'^[ a-zA-Z]+$' , t[1] ) , _A ) )
__magic_name__ : List[str] = list(filter(lambda _A : [t[0]] == tokenizer.encode(t[1] , add_special_tokens=_A ) , _A ) )
if max_length is not None and len(_A ) > max_length:
__magic_name__ : Optional[int] = toks[:max_length]
if min_length is not None and len(_A ) < min_length and len(_A ) > 0:
while len(_A ) < min_length:
__magic_name__ : Optional[int] = toks + toks
# toks_str = [t[1] for t in toks]
__magic_name__ : List[str] = [t[0] for t in toks]
# Ensure consistency
__magic_name__ : Optional[int] = tokenizer.decode(_A , clean_up_tokenization_spaces=_A )
if " " not in output_txt and len(_A ) > 1:
__magic_name__ : int = (
tokenizer.decode([toks_ids[0]] , clean_up_tokenization_spaces=_A )
+ ' '
+ tokenizer.decode(toks_ids[1:] , clean_up_tokenization_spaces=_A )
)
if with_prefix_space:
__magic_name__ : Union[str, Any] = ' ' + output_txt
__magic_name__ : Dict = tokenizer.encode(_A , add_special_tokens=_A )
return output_txt, output_ids
def __lowerCAmelCase ( self : int ) -> str:
__magic_name__ : Any = self.ta_base_tokenizer
__magic_name__ : Optional[Any] = tokenizer(['hi</s>', 'I went to the gym</s>', '</s>'] )
__magic_name__ : List[str] = tokenizer(['hi', 'I went to the gym', ''] )
self.assertListEqual(batch_with_eos_added['input_ids'] , batch_without_eos_added['input_ids'] )
def __lowerCAmelCase ( self : int ) -> Tuple:
__magic_name__ : Optional[int] = self.ta_base_tokenizer
__magic_name__ : Optional[int] = 'Unicode €.'
__magic_name__ : Optional[Any] = tokenizer(_A )
__magic_name__ : Optional[Any] = [88, 113, 108, 102, 114, 103, 104, 35, 229, 133, 175, 49, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__magic_name__ : Any = tokenizer.decode(_A )
self.assertEqual(_A , 'Unicode €.</s>' )
__magic_name__ : Any = tokenizer('e è é ê ë' )
__magic_name__ : str = [104, 35, 198, 171, 35, 198, 172, 35, 198, 173, 35, 198, 174, 1]
self.assertEqual(encoded['input_ids'] , _A )
# decoding
__magic_name__ : List[str] = tokenizer.decode(_A )
self.assertEqual(_A , 'e è é ê ë</s>' )
# encode/decode, but with `encode` instead of `__call__`
self.assertEqual(tokenizer.decode(tokenizer.encode('e è é ê ë' ) ) , 'e è é ê ë</s>' )
def __lowerCAmelCase ( self : Any ) -> int:
__magic_name__ : List[Any] = self.ta_base_tokenizer
__magic_name__ : Optional[Any] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
# fmt: off
__magic_name__ : List[Any] = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 1, 0]
# fmt: on
__magic_name__ : Any = tokenizer(_A , padding=_A , return_tensors=_A )
self.assertIsInstance(_A , _A )
if FRAMEWORK != "jax":
__magic_name__ : str = list(batch.input_ids.numpy()[0] )
else:
__magic_name__ : Optional[Any] = list(batch.input_ids.tolist()[0] )
self.assertListEqual(_A , _A )
self.assertEqual((2, 37) , batch.input_ids.shape )
self.assertEqual((2, 37) , batch.attention_mask.shape )
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : str = self.ta_base_tokenizer
__magic_name__ : List[str] = ['A long paragraph for summarization.', 'Another paragraph for summarization.']
__magic_name__ : Optional[int] = tokenizer(_A , padding=_A , return_tensors=_A )
# check if input_ids are returned and no decoder_input_ids
self.assertIn('input_ids' , _A )
self.assertIn('attention_mask' , _A )
self.assertNotIn('decoder_input_ids' , _A )
self.assertNotIn('decoder_attention_mask' , _A )
def __lowerCAmelCase ( self : List[Any] ) -> Optional[int]:
__magic_name__ : Union[str, Any] = self.ta_base_tokenizer
__magic_name__ : Tuple = [
'Summary of the text.',
'Another summary.',
]
__magic_name__ : Dict = tokenizer(
text_target=_A , max_length=32 , padding='max_length' , truncation=_A , return_tensors=_A )
self.assertEqual(32 , targets['input_ids'].shape[1] )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[Any]:
__magic_name__ : str = self.ta_base_tokenizer
__magic_name__ : Any = ['A long paragraph for summarization. </s>']
__magic_name__ : List[str] = ['Summary of the text. </s>']
# fmt: off
__magic_name__ : Tuple = [68, 35, 111, 114, 113, 106, 35, 115, 100, 117, 100, 106, 117, 100, 115, 107, 35, 105, 114, 117, 35, 118, 120, 112, 112, 100, 117, 108, 125, 100, 119, 108, 114, 113, 49, 35, 1]
__magic_name__ : List[Any] = [86, 120, 112, 112, 100, 117, 124, 35, 114, 105, 35, 119, 107, 104, 35, 119, 104, 123, 119, 49, 35, 1]
# fmt: on
__magic_name__ : str = tokenizer(_A , text_target=_A )
self.assertEqual(_A , batch['input_ids'][0] )
self.assertEqual(_A , batch['labels'][0] )
def __lowerCAmelCase ( self : Any ) -> str:
# safety check on max_len default value so we are sure the test works
__magic_name__ : Optional[int] = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
self.assertNotEqual(tokenizer.model_max_length , 42 )
# Now let's start the test
__magic_name__ : str = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : str = tempfile.mkdtemp()
__magic_name__ : Tuple = ' He is very happy, UNwant\u00E9d,running'
__magic_name__ : Union[str, Any] = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ : List[str] = tokenizer.__class__.from_pretrained(_A )
__magic_name__ : Optional[Any] = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
shutil.rmtree(_A )
__magic_name__ : Union[str, Any] = self.get_tokenizers(model_max_length=42 )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
# Isolate this from the other tests because we save additional tokens/etc
__magic_name__ : Optional[Any] = tempfile.mkdtemp()
__magic_name__ : Union[str, Any] = ' He is very happy, UNwant\u00E9d,running'
tokenizer.add_tokens(['bim', 'bambam'] )
__magic_name__ : Union[str, Any] = tokenizer.additional_special_tokens
additional_special_tokens.append('new_additional_special_token' )
tokenizer.add_special_tokens({'additional_special_tokens': additional_special_tokens} )
__magic_name__ : int = tokenizer.encode(_A , add_special_tokens=_A )
tokenizer.save_pretrained(_A )
__magic_name__ : Any = tokenizer.__class__.from_pretrained(_A )
__magic_name__ : Dict = after_tokenizer.encode(_A , add_special_tokens=_A )
self.assertListEqual(_A , _A )
self.assertIn('new_additional_special_token' , after_tokenizer.additional_special_tokens )
self.assertEqual(after_tokenizer.model_max_length , 42 )
__magic_name__ : int = tokenizer.__class__.from_pretrained(_A , model_max_length=43 )
self.assertEqual(tokenizer.model_max_length , 43 )
shutil.rmtree(_A )
def __lowerCAmelCase ( self : Tuple ) -> Union[str, Any]:
__magic_name__ : Tuple = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
with open(os.path.join(_A , 'special_tokens_map.json' ) , encoding='utf-8' ) as json_file:
__magic_name__ : Union[str, Any] = json.load(_A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , encoding='utf-8' ) as json_file:
__magic_name__ : Optional[Any] = json.load(_A )
__magic_name__ : List[str] = [F'<extra_id_{i}>' for i in range(125 )]
__magic_name__ : Any = added_tokens_extra_ids + [
'an_additional_special_token'
]
__magic_name__ : Tuple = added_tokens_extra_ids + [
'an_additional_special_token'
]
with open(os.path.join(_A , 'special_tokens_map.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
with open(os.path.join(_A , 'tokenizer_config.json' ) , 'w' , encoding='utf-8' ) as outfile:
json.dump(_A , _A )
# the following checks allow us to verify that our test works as expected, i.e. that the tokenizer takes
# into account the new value of additional_special_tokens given in the "tokenizer_config.json" and
# "special_tokens_map.json" files
__magic_name__ : str = tokenizer_class.from_pretrained(
_A , )
self.assertIn(
'an_additional_special_token' , tokenizer_without_change_in_init.additional_special_tokens )
# self.assertIn("an_additional_special_token",tokenizer_without_change_in_init.get_vocab()) # ByT5Tokenization no vocab
self.assertEqual(
['an_additional_special_token'] , tokenizer_without_change_in_init.convert_ids_to_tokens(
tokenizer_without_change_in_init.convert_tokens_to_ids(['an_additional_special_token'] ) ) , )
# Now we test that we can change the value of additional_special_tokens in the from_pretrained
__magic_name__ : Tuple = added_tokens_extra_ids + [AddedToken('a_new_additional_special_token' , lstrip=_A )]
__magic_name__ : Optional[Any] = tokenizer_class.from_pretrained(
_A , additional_special_tokens=_A , )
self.assertIn('a_new_additional_special_token' , tokenizer.additional_special_tokens )
self.assertEqual(
['a_new_additional_special_token'] , tokenizer.convert_ids_to_tokens(
tokenizer.convert_tokens_to_ids(['a_new_additional_special_token'] ) ) , )
def __lowerCAmelCase ( self : Any ) -> Optional[int]:
__magic_name__ : int = []
if self.test_slow_tokenizer:
tokenizer_list.append((self.tokenizer_class, self.get_tokenizer()) )
if self.test_rust_tokenizer:
tokenizer_list.append((self.rust_tokenizer_class, self.get_rust_tokenizer()) )
for tokenizer_class, tokenizer_utils in tokenizer_list:
with tempfile.TemporaryDirectory() as tmp_dir:
tokenizer_utils.save_pretrained(_A )
__magic_name__ : List[Any] = tokenizer_class.from_pretrained(_A )
self.assertTrue(tokenizer.decode([255] ) == '' )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
pass
def __lowerCAmelCase ( self : List[str] ) -> int:
pass
def __lowerCAmelCase ( self : Optional[int] ) -> Optional[int]:
pass
def __lowerCAmelCase ( self : List[Any] ) -> int:
pass
def __lowerCAmelCase ( self : str ) -> Tuple:
# The default common tokenizer tests uses invalid tokens for ByT5 that can only accept one-character strings
# and special added tokens as tokens
__magic_name__ : List[str] = self.get_tokenizers(fast=_A , do_lower_case=_A )
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : Any = ['t', 'h', 'i', 's', ' ', 'i', 's', ' ', 'a', ' ', 't', 'e', 'x', 't', '</s>']
__magic_name__ : int = tokenizer.convert_tokens_to_string(_A )
self.assertIsInstance(_A , _A )
def __lowerCAmelCase ( self : Any ) -> Tuple:
__magic_name__ : Any = self.get_tokenizers()
for tokenizer in tokenizers:
with self.subTest(F'{tokenizer.__class__.__name__}' ):
__magic_name__ : List[str] = [
'bos_token',
'eos_token',
'unk_token',
'sep_token',
'pad_token',
'cls_token',
'mask_token',
]
__magic_name__ : List[str] = 0
__magic_name__ : str = tokenizer.convert_ids_to_tokens(
_A , skip_special_tokens=_A )
for attr in attributes_list:
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , attr + '_id' , _A )
self.assertEqual(getattr(_A , _A ) , _A )
self.assertEqual(getattr(_A , attr + '_id' ) , _A )
setattr(_A , 'additional_special_tokens_ids' , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [] )
setattr(_A , 'additional_special_tokens_ids' , [token_id_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens' ) , [token_to_test_setters] )
self.assertListEqual(getattr(_A , 'additional_special_tokens_ids' ) , [token_id_to_test_setters] ) | 331 | 0 |
'''simple docstring'''
import argparse
import json
import os
import re
import shutil
import torch
from transformers import BioGptConfig, BioGptForCausalLM
from transformers.models.biogpt.tokenization_biogpt import VOCAB_FILES_NAMES
from transformers.tokenization_utils_base import TOKENIZER_CONFIG_FILE
from transformers.utils import WEIGHTS_NAME, logging
logging.set_verbosity_warning()
_lowerCAmelCase = 2
class lowerCAmelCase_:
'''simple docstring'''
def __init__( self ,*, # begin keyword-only arguments
__UpperCAmelCase="<s>" ,__UpperCAmelCase="<pad>" ,__UpperCAmelCase="</s>" ,__UpperCAmelCase="<unk>" ,__UpperCAmelCase=None ,) -> int:
lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ , lowerCAmelCase__ : List[str] = bos, unk, pad, eos
lowerCAmelCase__ : Dict = []
lowerCAmelCase__ : Tuple = []
lowerCAmelCase__ : Dict = {}
lowerCAmelCase__ : int = self.add_symbol(__UpperCAmelCase )
lowerCAmelCase__ : List[str] = self.add_symbol(__UpperCAmelCase )
lowerCAmelCase__ : Union[str, Any] = self.add_symbol(__UpperCAmelCase )
lowerCAmelCase__ : Optional[Any] = self.add_symbol(__UpperCAmelCase )
if extra_special_symbols:
for s in extra_special_symbols:
self.add_symbol(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = len(self.symbols )
def __eq__( self ,__UpperCAmelCase ) -> Union[str, Any]:
return self.indices == other.indices
def __getitem__( self ,__UpperCAmelCase ) -> int:
if idx < len(self.symbols ):
return self.symbols[idx]
return self.unk_word
def __len__( self ) -> List[str]:
return len(self.symbols )
def __contains__( self ,__UpperCAmelCase ) -> Optional[int]:
return sym in self.indices
@classmethod
def UpperCAmelCase_ ( cls ,__UpperCAmelCase ) -> Optional[int]:
lowerCAmelCase__ : List[Any] = cls()
d.add_from_file(__UpperCAmelCase )
return d
def UpperCAmelCase_ ( self ,__UpperCAmelCase ,__UpperCAmelCase=1 ,__UpperCAmelCase=False ) -> List[Any]:
if word in self.indices and not overwrite:
lowerCAmelCase__ : Tuple = self.indices[word]
lowerCAmelCase__ : Optional[Any] = self.count[idx] + n
return idx
else:
lowerCAmelCase__ : str = len(self.symbols )
lowerCAmelCase__ : Tuple = idx
self.symbols.append(__UpperCAmelCase )
self.count.append(__UpperCAmelCase )
return idx
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[Any]:
return 0
def UpperCAmelCase_ ( self ,__UpperCAmelCase ) -> List[str]:
if isinstance(__UpperCAmelCase ,__UpperCAmelCase ):
try:
with open(__UpperCAmelCase ,"""r""" ,encoding="""utf-8""" ) as fd:
self.add_from_file(__UpperCAmelCase )
except FileNotFoundError as fnfe:
raise fnfe
except UnicodeError:
raise Exception("""Incorrect encoding detected in {}, please rebuild the dataset""".format(__UpperCAmelCase ) )
return
lowerCAmelCase__ : int = f.readlines()
lowerCAmelCase__ : int = self._load_meta(__UpperCAmelCase )
for line in lines[indices_start_line:]:
try:
lowerCAmelCase__ , lowerCAmelCase__ : Tuple = line.rstrip().rsplit(""" """ ,1 )
if field == "#fairseq:overwrite":
lowerCAmelCase__ : List[str] = True
lowerCAmelCase__ , lowerCAmelCase__ : str = line.rsplit(""" """ ,1 )
else:
lowerCAmelCase__ : Tuple = False
lowerCAmelCase__ : Any = int(__UpperCAmelCase )
lowerCAmelCase__ : List[Any] = line
if word in self and not overwrite:
raise RuntimeError(
"""Duplicate word found when loading Dictionary: '{}'. """
"""Duplicate words can overwrite earlier ones by adding the """
"""#fairseq:overwrite flag at the end of the corresponding row """
"""in the dictionary file. If using the Camembert model, please """
"""download an updated copy of the model file.""".format(__UpperCAmelCase ) )
self.add_symbol(__UpperCAmelCase ,n=__UpperCAmelCase ,overwrite=__UpperCAmelCase )
except ValueError:
raise ValueError("""Incorrect dictionary format, expected '<token> <cnt> [flags]'""" )
def _SCREAMING_SNAKE_CASE ( UpperCamelCase ):
"""simple docstring"""
lowerCAmelCase__ : int = dict((re.sub(R"""@@$""" , """""" , UpperCamelCase ), v) if k.endswith("""@@""" ) else (re.sub(R"""$""" , """</w>""" , UpperCamelCase ), v) for k, v in d.items() )
lowerCAmelCase__ : Optional[Any] = """<s> <pad> </s> <unk>""".split()
# restore the special tokens
for k in keep_keys:
del da[f"""{k}</w>"""]
lowerCAmelCase__ : List[Any] = d[k] # restore
return da
def _SCREAMING_SNAKE_CASE ( UpperCamelCase , UpperCamelCase ):
"""simple docstring"""
if not os.path.exists(UpperCamelCase ):
raise ValueError(f"""path {biogpt_checkpoint_path} does not exist!""" )
os.makedirs(UpperCamelCase , exist_ok=UpperCamelCase )
print(f"""Writing results to {pytorch_dump_folder_path}""" )
# handle various types of models
lowerCAmelCase__ : int = os.path.join(UpperCamelCase , """checkpoint.pt""" )
if not os.path.isfile(UpperCamelCase ):
raise ValueError(f"""path to the file {checkpoint_file} does not exist!""" )
lowerCAmelCase__ : Union[str, Any] = torch.load(UpperCamelCase , map_location="""cpu""" )
lowerCAmelCase__ : Optional[int] = chkpt["""cfg"""]["""model"""]
# dicts
lowerCAmelCase__ : str = os.path.join(UpperCamelCase , """dict.txt""" )
if not os.path.isfile(UpperCamelCase ):
raise ValueError(f"""path to the file {dict_file} does not exist!""" )
lowerCAmelCase__ : Any = Dictionary.load(UpperCamelCase )
lowerCAmelCase__ : Any = rewrite_dict_keys(src_dict.indices )
lowerCAmelCase__ : str = len(UpperCamelCase )
lowerCAmelCase__ : int = os.path.join(UpperCamelCase , VOCAB_FILES_NAMES["""vocab_file"""] )
print(f"""Generating {src_vocab_file} of {src_vocab_size} records""" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# merges_file (bpecodes)
lowerCAmelCase__ : Optional[Any] = os.path.join(UpperCamelCase , """bpecodes""" )
if not os.path.isfile(UpperCamelCase ):
raise ValueError(f"""path to the file {bpecodes_file} does not exist!""" )
lowerCAmelCase__ : Dict = os.path.join(UpperCamelCase , VOCAB_FILES_NAMES["""merges_file"""] )
shutil.copyfile(UpperCamelCase , UpperCamelCase )
# model config
lowerCAmelCase__ : Optional[Any] = os.path.join(UpperCamelCase , """config.json""" )
lowerCAmelCase__ : Optional[int] = {
"""activation_dropout""": args["""activation_dropout"""],
"""architectures""": ["""BioGptForCausalLM"""],
"""attention_probs_dropout_prob""": args["""attention_dropout"""],
"""bos_token_id""": 0,
"""eos_token_id""": 2,
"""hidden_act""": args["""activation_fn"""],
"""hidden_dropout_prob""": args["""dropout"""],
"""hidden_size""": args["""decoder_embed_dim"""],
"""initializer_range""": 0.02,
"""intermediate_size""": args["""decoder_ffn_embed_dim"""],
"""layer_norm_eps""": 1e-12,
"""layerdrop""": args["""decoder_layerdrop"""],
"""max_position_embeddings""": args["""max_target_positions"""],
"""model_type""": """biogpt""",
"""num_attention_heads""": args["""decoder_attention_heads"""],
"""num_hidden_layers""": args["""decoder_layers"""],
"""pad_token_id""": 1,
"""scale_embedding""": not args["""no_scale_embedding"""],
"""tie_word_embeddings""": args["""share_decoder_input_output_embed"""],
"""vocab_size""": src_vocab_size,
}
# good hparam defaults to start with
print(f"""Generating {biogpt_model_config_file}""" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# tokenizer config
lowerCAmelCase__ : str = os.path.join(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : Tuple = {
"""bos_token""": """<s>""",
"""eos_token""": """</s>""",
"""model_max_length""": 1024,
"""pad_token""": """<pad>""",
"""special_tokens_map_file""": None,
"""tokenizer_class""": """BioGptTokenizer""",
"""unk_token""": """<unk>""",
}
print(f"""Generating {biogpt_tokenizer_config_file}""" )
with open(UpperCamelCase , """w""" , encoding="""utf-8""" ) as f:
f.write(json.dumps(UpperCamelCase , ensure_ascii=UpperCamelCase , indent=UpperCamelCase ) )
# model
lowerCAmelCase__ : Union[str, Any] = chkpt["""model"""]
# remove unneeded keys
lowerCAmelCase__ : Optional[Any] = [
"""decoder.version""",
]
for k in ignore_keys:
model_state_dict.pop(UpperCamelCase , UpperCamelCase )
lowerCAmelCase__ : List[str] = list(model_state_dict.keys() )
for layer_name in layer_names:
if layer_name.endswith("""output_projection.weight""" ):
lowerCAmelCase__ : Any = model_state_dict.pop(UpperCamelCase )
else:
lowerCAmelCase__ : Any = model_state_dict.pop(UpperCamelCase )
lowerCAmelCase__ : Union[str, Any] = BioGptConfig.from_pretrained(UpperCamelCase )
lowerCAmelCase__ : Dict = BioGptForCausalLM(UpperCamelCase )
# check that it loads ok
model_new.load_state_dict(UpperCamelCase )
# save
lowerCAmelCase__ : Any = os.path.join(UpperCamelCase , UpperCamelCase )
print(f"""Generating {pytorch_weights_dump_path}""" )
torch.save(UpperCamelCase , UpperCamelCase )
print("""Conversion is done!""" )
if __name__ == "__main__":
_lowerCAmelCase = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'''--biogpt_checkpoint_path''',
default=None,
type=str,
required=True,
help=(
'''Path to the official PyTorch checkpoint file which is expected to reside in the dump dir with dicts,'''
''' bpecodes, etc.'''
),
)
parser.add_argument(
'''--pytorch_dump_folder_path''', default=None, type=str, required=True, help='''Path to the output PyTorch model.'''
)
_lowerCAmelCase = parser.parse_args()
convert_biogpt_checkpoint_to_pytorch(args.biogpt_checkpoint_path, args.pytorch_dump_folder_path)
| 37 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch
if is_torch_available():
import torch
from transformers.generation import DisjunctiveConstraint
@require_torch
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[int] ) -> List[Any]:
# For consistency across different places the DisjunctiveConstraint is called,
# dc.token_ids is a list of integers. It is also initialized only by integers.
__magic_name__ : Any = [[1, 2, 4], [1, 2, 3, 4]]
__magic_name__ : Dict = DisjunctiveConstraint(_A )
self.assertTrue(isinstance(dc.token_ids , _A ) )
with self.assertRaises(_A ):
DisjunctiveConstraint(torch.LongTensor([[1, 2, 4], [1, 2, 3]] ) )
with self.assertRaises(_A ):
DisjunctiveConstraint([torch.LongTensor([1, 2, 4] ), torch.LongTensor([1, 2, 3, 4, 5] )] )
def __lowerCAmelCase ( self : List[Any] ) -> List[Any]:
# We can't have constraints that are complete subsets of another. This leads to a preverse
# interpretation of "constraint fulfillment": does generating [1,2,3] fulfill the constraint?
# It would mean that it generated [1,2] which fulfills it, but it's in the middle of potentially
# fulfilling [1,2,3,4]. If we believe that [1,2,3] does fulfill the constraint, then the algorithm
# will necessarily never reach [1,2,3,4], giving users a false sense of control (better to just not allow it).
__magic_name__ : Optional[int] = [[1, 2], [1, 2, 3, 4]]
with self.assertRaises(_A ):
DisjunctiveConstraint(_A ) # fails here
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
__magic_name__ : Dict = [[1, 2, 3], [1, 2, 4]]
__magic_name__ : List[Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : Tuple = dc.update(1 )
__magic_name__ : Optional[int] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(2 )
__magic_name__ : List[Any] = stepped is True and completed is False and reset is False
self.assertTrue(_A )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(3 )
__magic_name__ : Any = stepped is True and completed is True and reset is False
self.assertTrue(_A )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 3] )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : Union[str, Any] = [[1, 2, 3], [1, 2, 4, 5], [1, 2, 5]]
__magic_name__ : Union[str, Any] = DisjunctiveConstraint(_A )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : Optional[int] = dc.update(4 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.current_seq == [1, 2, 4] )
__magic_name__ , __magic_name__ , __magic_name__ : Any = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.current_seq == [1, 2, 4, 5] )
dc.reset()
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(1 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 3 )
self.assertTrue(dc.current_seq == [1] )
__magic_name__ , __magic_name__ , __magic_name__ : List[Any] = dc.update(2 )
self.assertTrue(not dc.completed )
self.assertTrue(dc.remaining() == 2 )
self.assertTrue(dc.current_seq == [1, 2] )
__magic_name__ , __magic_name__ , __magic_name__ : int = dc.update(5 )
self.assertTrue(dc.completed ) # Completed!
self.assertTrue(dc.remaining() == 0 )
self.assertTrue(dc.current_seq == [1, 2, 5] ) | 331 | 0 |
from __future__ import annotations
UpperCAmelCase_ : List[Any] = list[list[int]]
# assigning initial values to the grid
UpperCAmelCase_ : Matrix = [
[3, 0, 6, 5, 0, 8, 4, 0, 0],
[5, 2, 0, 0, 0, 0, 0, 0, 0],
[0, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
# a grid with no solution
UpperCAmelCase_ : Matrix = [
[5, 0, 6, 5, 0, 8, 4, 0, 3],
[5, 2, 0, 0, 0, 0, 0, 0, 2],
[1, 8, 7, 0, 0, 0, 0, 3, 1],
[0, 0, 3, 0, 1, 0, 0, 8, 0],
[9, 0, 0, 8, 6, 3, 0, 0, 5],
[0, 5, 0, 0, 9, 0, 6, 0, 0],
[1, 3, 0, 0, 0, 0, 2, 5, 0],
[0, 0, 0, 0, 0, 0, 0, 7, 4],
[0, 0, 5, 2, 0, 6, 3, 0, 0],
]
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Matrix , __magic_name__ : int , __magic_name__ : int , __magic_name__ : int ) -> bool:
"""simple docstring"""
for i in range(9 ):
if grid[row][i] == n or grid[i][column] == n:
return False
for i in range(3 ):
for j in range(3 ):
if grid[(row - row % 3) + i][(column - column % 3) + j] == n:
return False
return True
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Matrix ) -> tuple[int, int] | None:
"""simple docstring"""
for i in range(9 ):
for j in range(9 ):
if grid[i][j] == 0:
return i, j
return None
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Matrix ) -> Matrix | None:
"""simple docstring"""
if location := find_empty_location(__magic_name__ ):
UpperCamelCase , UpperCamelCase :Tuple = location
else:
# If the location is ``None``, then the grid is solved.
return grid
for digit in range(1 , 10 ):
if is_safe(__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ ):
UpperCamelCase :Dict = digit
if sudoku(__magic_name__ ) is not None:
return grid
UpperCamelCase :List[str] = 0
return None
def SCREAMING_SNAKE_CASE_ ( __magic_name__ : Matrix ) -> None:
"""simple docstring"""
for row in grid:
for cell in row:
print(__magic_name__ , end=""" """ )
print()
if __name__ == "__main__":
# make a copy of grid so that you can compare with the unmodified grid
for example_grid in (initial_grid, no_solution):
print('''\nExample grid:\n''' + '''=''' * 20)
print_solution(example_grid)
print('''\nExample grid solution:''')
UpperCAmelCase_ : Union[str, Any] = sudoku(example_grid)
if solution is not None:
print_solution(solution)
else:
print('''Cannot find a solution.''')
| 38 |
'''simple docstring'''
import re
import tempfile
from pathlib import Path
import pytest
import yaml
from datasets.utils.readme import ReadMe
# @pytest.fixture
# def example_yaml_structure():
lowerCAmelCase :List[str] = yaml.safe_load(
'''\
name: ""
allow_empty: false
allow_empty_text: true
subsections:
- name: "Dataset Card for X" # First-level markdown heading
allow_empty: false
allow_empty_text: true
subsections:
- name: "Table of Contents"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Dataset Description"
allow_empty: false
allow_empty_text: false
subsections:
- name: "Dataset Summary"
allow_empty: false
allow_empty_text: false
subsections: null
- name: "Supported Tasks and Leaderboards"
allow_empty: true
allow_empty_text: true
subsections: null
- name: Languages
allow_empty: false
allow_empty_text: true
subsections: null
'''
)
lowerCAmelCase :List[Any] = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Union[str, Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
#### Extra Ignored Subsection
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Tuple = {
'''name''': '''root''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{
'''name''': '''Dataset Card for My Dataset''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [
{'''name''': '''Table of Contents''', '''text''': '''Some text here.''', '''is_empty_text''': False, '''subsections''': []},
{
'''name''': '''Dataset Description''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Dataset Summary''',
'''text''': '''Some text here.''',
'''is_empty_text''': False,
'''subsections''': [
{
'''name''': '''Extra Ignored Subsection''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
}
],
},
{
'''name''': '''Supported Tasks and Leaderboards''',
'''text''': '''''',
'''is_empty_text''': True,
'''subsections''': [],
},
{'''name''': '''Languages''', '''text''': '''Language Text''', '''is_empty_text''': False, '''subsections''': []},
],
},
],
}
],
}
lowerCAmelCase :Optional[Any] = '''\
---
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[int] = (
'''The following issues were found for the README at `{path}`:\n-\tEmpty YAML markers are present in the README.'''
)
lowerCAmelCase :Tuple = '''\
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Union[str, Any] = (
'''The following issues were found for the README at `{path}`:\n-\tNo YAML markers are present in the README.'''
)
lowerCAmelCase :Dict = '''\
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tOnly the start of YAML tags present in the README.'''
lowerCAmelCase :Optional[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :int = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Summary` but it is empty.\n-\tExpected some text in section `Dataset Summary` but it is empty (text in subsections are ignored).'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Dataset Card for My Dataset` but it is empty.\n-\tSection `Dataset Card for My Dataset` expected the following subsections: `Table of Contents`, `Dataset Description`. Found \'None\'.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Languages
Language Text
'''
lowerCAmelCase :List[str] = '''The following issues were found for the README at `{path}`:\n-\tSection `Dataset Description` is missing subsection: `Supported Tasks and Leaderboards`.'''
lowerCAmelCase :int = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tExpected some content in section `Languages` but it is empty.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :Optional[Any] = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Any = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
# Dataset Card My Dataset
'''
lowerCAmelCase :Dict = '''The following issues were found for the README at `{path}`:\n-\tThe README has several first-level headings: `Dataset Card for My Dataset`, `Dataset Card My Dataset`. Only one heading is expected. Skipping further validation for this README.'''
lowerCAmelCase :Tuple = '''\
---
language:
- zh
- en
---
# Dataset Card My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found for the README at `{path}`:\n-\tNo first-level heading starting with `Dataset Card for` found in README. Skipping further validation for this README.'''
lowerCAmelCase :Any = ''''''
lowerCAmelCase :Any = '''The following issues were found for the README at `{path}`:\n-\tThe README has no first-level headings. One heading is expected. Skipping further validation for this README.\n-\tNo YAML markers are present in the README.'''
lowerCAmelCase :List[Any] = '''\
---
language:
- zh
- en
---
# Dataset Card for My Dataset
# Dataset Card for My Dataset
## Table of Contents
Some text here.
## Dataset Description
Some text here.
### Dataset Summary
Some text here.
### Supported Tasks and Leaderboards
### Languages
Language Text
'''
lowerCAmelCase :str = '''The following issues were found while parsing the README at `{path}`:\n-\tMultiple sections with the same heading `Dataset Card for My Dataset` have been found. Please keep only one of these sections.'''
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : str , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
assert ReadMe.from_string(lowerCAmelCase , lowerCAmelCase ).to_dict() == expected_dict
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[int] , lowerCAmelCase : Dict ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
__magic_name__ : str = ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : List[str] , lowerCAmelCase : Optional[int] ):
"""simple docstring"""
with pytest.raises(lowerCAmelCase , match=re.escape(expected_error.format(path='root' ) ) ):
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
ReadMe.from_string(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md, expected_dict' , [
(README_CORRECT, CORRECT_DICT),
(README_CORRECT_FOUR_LEVEL, CORRECT_DICT_FOUR_LEVEL),
] , )
def lowerCamelCase ( lowerCAmelCase : Optional[Any] , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Optional[int] = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase ).to_dict()
assert out["name"] == path
assert out["text"] == ""
assert out["is_empty_text"]
assert out["subsections"] == expected_dict["subsections"]
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_NO_YAML, EXPECTED_ERROR_README_NO_YAML),
(README_EMPTY_YAML, EXPECTED_ERROR_README_EMPTY_YAML),
(README_INCORRECT_YAML, EXPECTED_ERROR_README_INCORRECT_YAML),
(README_EMPTY, EXPECTED_ERROR_README_EMPTY),
(README_NONE_SUBSECTION, EXPECTED_ERROR_README_NONE_SUBSECTION),
(README_MISSING_FIRST_LEVEL, EXPECTED_ERROR_README_MISSING_FIRST_LEVEL),
(README_MISSING_SUBSECTION, EXPECTED_ERROR_README_MISSING_SUBSECTION),
(README_MISSING_TEXT, EXPECTED_ERROR_README_MISSING_TEXT),
(README_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_WRONG_FIRST_LEVEL),
(README_MULTIPLE_WRONG_FIRST_LEVEL, EXPECTED_ERROR_README_MULTIPLE_WRONG_FIRST_LEVEL),
(README_MISSING_CONTENT, EXPECTED_ERROR_README_MISSING_CONTENT),
] , )
def lowerCamelCase ( lowerCAmelCase : Tuple , lowerCAmelCase : List[Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Union[str, Any] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : str = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
__magic_name__ : int = ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
readme.validate()
@pytest.mark.parametrize(
'readme_md, expected_error' , [
(README_MULTIPLE_SAME_HEADING_1, EXPECTED_ERROR_README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : int , lowerCAmelCase : str ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Optional[int] = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
__magic_name__ : Any = expected_error.format(path=lowerCAmelCase )
with pytest.raises(lowerCAmelCase , match=re.escape(lowerCAmelCase ) ):
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase )
@pytest.mark.parametrize(
'readme_md,' , [
(README_MULTIPLE_SAME_HEADING_1),
] , )
def lowerCamelCase ( lowerCAmelCase : Union[str, Any] ):
"""simple docstring"""
with tempfile.TemporaryDirectory() as tmp_dir:
__magic_name__ : Any = Path(lowerCAmelCase ) / 'README.md'
with open(lowerCAmelCase , 'w+' ) as readme_file:
readme_file.write(lowerCAmelCase )
ReadMe.from_readme(lowerCAmelCase , lowerCAmelCase , suppress_parsing_errors=lowerCAmelCase ) | 331 | 0 |
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
_a = {'''configuration_mbart''': ['''MBART_PRETRAINED_CONFIG_ARCHIVE_MAP''', '''MBartConfig''', '''MBartOnnxConfig''']}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''MBartTokenizer''']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = ['''MBartTokenizerFast''']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''MBART_PRETRAINED_MODEL_ARCHIVE_LIST''',
'''MBartForCausalLM''',
'''MBartForConditionalGeneration''',
'''MBartForQuestionAnswering''',
'''MBartForSequenceClassification''',
'''MBartModel''',
'''MBartPreTrainedModel''',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''TFMBartForConditionalGeneration''',
'''TFMBartModel''',
'''TFMBartPreTrainedModel''',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
_a = [
'''FlaxMBartForConditionalGeneration''',
'''FlaxMBartForQuestionAnswering''',
'''FlaxMBartForSequenceClassification''',
'''FlaxMBartModel''',
'''FlaxMBartPreTrainedModel''',
]
if TYPE_CHECKING:
from .configuration_mbart import MBART_PRETRAINED_CONFIG_ARCHIVE_MAP, MBartConfig, MBartOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart import MBartTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_mbart_fast import MBartTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mbart import (
MBART_PRETRAINED_MODEL_ARCHIVE_LIST,
MBartForCausalLM,
MBartForConditionalGeneration,
MBartForQuestionAnswering,
MBartForSequenceClassification,
MBartModel,
MBartPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mbart import TFMBartForConditionalGeneration, TFMBartModel, TFMBartPreTrainedModel
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_mbart import (
FlaxMBartForConditionalGeneration,
FlaxMBartForQuestionAnswering,
FlaxMBartForSequenceClassification,
FlaxMBartModel,
FlaxMBartPreTrainedModel,
)
else:
import sys
_a = _LazyModule(__name__, globals()['''__file__'''], _import_structure, module_spec=__spec__)
| 39 |
'''simple docstring'''
from __future__ import annotations
import os
import tempfile
import unittest
from transformers import ConvBertConfig, is_tf_available
from transformers.testing_utils import require_tf, slow
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFConvBertForMaskedLM,
TFConvBertForMultipleChoice,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertModel,
)
class _lowerCamelCase :
'''simple docstring'''
def __init__( self : Optional[Any] , _A : Optional[int] , _A : Union[str, Any]=13 , _A : Optional[int]=7 , _A : int=True , _A : Union[str, Any]=True , _A : Tuple=True , _A : Dict=True , _A : int=99 , _A : str=32 , _A : List[Any]=2 , _A : Any=4 , _A : List[str]=37 , _A : List[str]="gelu" , _A : Any=0.1 , _A : List[str]=0.1 , _A : Optional[Any]=512 , _A : str=16 , _A : Union[str, Any]=2 , _A : List[Any]=0.02 , _A : Any=3 , _A : str=4 , _A : int=None , ) -> int:
__magic_name__ : str = parent
__magic_name__ : List[Any] = 13
__magic_name__ : Union[str, Any] = 7
__magic_name__ : Tuple = True
__magic_name__ : Dict = True
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = True
__magic_name__ : int = 99
__magic_name__ : List[str] = 384
__magic_name__ : Optional[int] = 2
__magic_name__ : List[Any] = 4
__magic_name__ : int = 37
__magic_name__ : Union[str, Any] = 'gelu'
__magic_name__ : Optional[int] = 0.1
__magic_name__ : str = 0.1
__magic_name__ : Optional[Any] = 512
__magic_name__ : Any = 16
__magic_name__ : Union[str, Any] = 2
__magic_name__ : Any = 0.02
__magic_name__ : List[str] = 3
__magic_name__ : Tuple = 4
__magic_name__ : List[Any] = 128
__magic_name__ : Optional[Any] = 2
__magic_name__ : List[str] = 9
__magic_name__ : str = 1
__magic_name__ : List[str] = None
def __lowerCAmelCase ( self : List[str] ) -> List[str]:
__magic_name__ : Tuple = ids_tensor([self.batch_size, self.seq_length] , self.vocab_size )
__magic_name__ : Optional[Any] = None
if self.use_input_mask:
__magic_name__ : str = random_attention_mask([self.batch_size, self.seq_length] )
__magic_name__ : List[str] = None
if self.use_token_type_ids:
__magic_name__ : int = ids_tensor([self.batch_size, self.seq_length] , self.type_vocab_size )
__magic_name__ : Tuple = None
__magic_name__ : Union[str, Any] = None
__magic_name__ : int = None
if self.use_labels:
__magic_name__ : int = ids_tensor([self.batch_size] , self.type_sequence_label_size )
__magic_name__ : Any = ids_tensor([self.batch_size, self.seq_length] , self.num_labels )
__magic_name__ : int = ids_tensor([self.batch_size] , self.num_choices )
__magic_name__ : Optional[Any] = ConvBertConfig(
vocab_size=self.vocab_size , hidden_size=self.hidden_size , num_hidden_layers=self.num_hidden_layers , num_attention_heads=self.num_attention_heads , intermediate_size=self.intermediate_size , hidden_act=self.hidden_act , hidden_dropout_prob=self.hidden_dropout_prob , attention_probs_dropout_prob=self.attention_probs_dropout_prob , max_position_embeddings=self.max_position_embeddings , type_vocab_size=self.type_vocab_size , initializer_range=self.initializer_range , return_dict=_A , )
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def __lowerCAmelCase ( self : int , _A : int , _A : str , _A : Union[str, Any] , _A : List[str] , _A : Tuple , _A : int , _A : Union[str, Any] ) -> Any:
__magic_name__ : Dict = TFConvBertModel(config=_A )
__magic_name__ : int = {'input_ids': input_ids, 'attention_mask': input_mask, 'token_type_ids': token_type_ids}
__magic_name__ : Any = [input_ids, input_mask]
__magic_name__ : Tuple = model(_A )
__magic_name__ : List[Any] = model(_A )
self.parent.assertEqual(result.last_hidden_state.shape , (self.batch_size, self.seq_length, self.hidden_size) )
def __lowerCAmelCase ( self : int , _A : str , _A : Dict , _A : Dict , _A : Dict , _A : Any , _A : Optional[int] , _A : int ) -> Optional[Any]:
__magic_name__ : Dict = TFConvBertForMaskedLM(config=_A )
__magic_name__ : Union[str, Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Dict = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.vocab_size) )
def __lowerCAmelCase ( self : Optional[int] , _A : str , _A : Union[str, Any] , _A : Tuple , _A : Dict , _A : Dict , _A : Union[str, Any] , _A : Dict ) -> Tuple:
__magic_name__ : Any = self.num_labels
__magic_name__ : str = TFConvBertForSequenceClassification(config=_A )
__magic_name__ : List[Any] = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_labels) )
def __lowerCAmelCase ( self : int , _A : Dict , _A : Tuple , _A : str , _A : str , _A : int , _A : List[Any] , _A : Optional[int] ) -> Union[str, Any]:
__magic_name__ : Optional[Any] = self.num_choices
__magic_name__ : Optional[int] = TFConvBertForMultipleChoice(config=_A )
__magic_name__ : Union[str, Any] = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : str = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Tuple = tf.tile(tf.expand_dims(_A , 1 ) , (1, self.num_choices, 1) )
__magic_name__ : Optional[int] = {
'input_ids': multiple_choice_inputs_ids,
'attention_mask': multiple_choice_input_mask,
'token_type_ids': multiple_choice_token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.num_choices) )
def __lowerCAmelCase ( self : List[Any] , _A : int , _A : List[str] , _A : int , _A : Tuple , _A : List[str] , _A : Any , _A : Optional[int] ) -> List[Any]:
__magic_name__ : List[Any] = self.num_labels
__magic_name__ : Union[str, Any] = TFConvBertForTokenClassification(config=_A )
__magic_name__ : Dict = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Any = model(_A )
self.parent.assertEqual(result.logits.shape , (self.batch_size, self.seq_length, self.num_labels) )
def __lowerCAmelCase ( self : Optional[int] , _A : List[Any] , _A : Tuple , _A : List[Any] , _A : Optional[int] , _A : Tuple , _A : str , _A : List[str] ) -> int:
__magic_name__ : Dict = TFConvBertForQuestionAnswering(config=_A )
__magic_name__ : int = {
'input_ids': input_ids,
'attention_mask': input_mask,
'token_type_ids': token_type_ids,
}
__magic_name__ : Union[str, Any] = model(_A )
self.parent.assertEqual(result.start_logits.shape , (self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape , (self.batch_size, self.seq_length) )
def __lowerCAmelCase ( self : Optional[Any] ) -> Optional[int]:
__magic_name__ : List[str] = self.prepare_config_and_inputs()
(
(
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) , (
__magic_name__
) ,
) : str = config_and_inputs
__magic_name__ : Dict = {'input_ids': input_ids, 'token_type_ids': token_type_ids, 'attention_mask': input_mask}
return config, inputs_dict
@require_tf
class _lowerCamelCase ( lowercase__ , lowercase__ , unittest.TestCase ):
'''simple docstring'''
A_ : Optional[int] = (
(
TFConvBertModel,
TFConvBertForMaskedLM,
TFConvBertForQuestionAnswering,
TFConvBertForSequenceClassification,
TFConvBertForTokenClassification,
TFConvBertForMultipleChoice,
)
if is_tf_available()
else ()
)
A_ : List[str] = (
{
"""feature-extraction""": TFConvBertModel,
"""fill-mask""": TFConvBertForMaskedLM,
"""question-answering""": TFConvBertForQuestionAnswering,
"""text-classification""": TFConvBertForSequenceClassification,
"""token-classification""": TFConvBertForTokenClassification,
"""zero-shot""": TFConvBertForSequenceClassification,
}
if is_tf_available()
else {}
)
A_ : Tuple = False
A_ : Any = False
A_ : List[Any] = False
def __lowerCAmelCase ( self : List[Any] ) -> int:
__magic_name__ : Optional[Any] = TFConvBertModelTester(self )
__magic_name__ : List[Any] = ConfigTester(self , config_class=_A , hidden_size=37 )
def __lowerCAmelCase ( self : str ) -> Dict:
self.config_tester.run_common_tests()
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_A )
def __lowerCAmelCase ( self : Optional[int] ) -> int:
__magic_name__ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_A )
def __lowerCAmelCase ( self : List[Any] ) -> Dict:
__magic_name__ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_A )
def __lowerCAmelCase ( self : List[str] ) -> Optional[int]:
__magic_name__ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_A )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Union[str, Any]:
__magic_name__ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_A )
def __lowerCAmelCase ( self : int ) -> Any:
__magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_A )
@slow
def __lowerCAmelCase ( self : Dict ) -> List[str]:
__magic_name__ , __magic_name__ : int = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : Optional[int] = True
__magic_name__ : Any = True
if hasattr(_A , 'use_cache' ):
__magic_name__ : List[Any] = True
__magic_name__ : str = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : Optional[Any] = getattr(self.model_tester , 'key_length' , _A )
for model_class in self.all_model_classes:
__magic_name__ : List[str] = self._prepare_for_class(_A , _A )
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Tuple = len(model(_A ) )
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_A , saved_model=_A )
__magic_name__ : Union[str, Any] = os.path.join(_A , 'saved_model' , '1' )
__magic_name__ : Optional[int] = tf.keras.models.load_model(_A )
__magic_name__ : Optional[Any] = model(_A )
if self.is_encoder_decoder:
__magic_name__ : Optional[int] = outputs['encoder_hidden_states']
__magic_name__ : Tuple = outputs['encoder_attentions']
else:
__magic_name__ : Union[str, Any] = outputs['hidden_states']
__magic_name__ : Optional[Any] = outputs['attentions']
self.assertEqual(len(_A ) , _A )
__magic_name__ : Optional[Any] = getattr(
self.model_tester , 'expected_num_hidden_layers' , self.model_tester.num_hidden_layers + 1 )
self.assertEqual(len(_A ) , _A )
self.assertListEqual(
list(output_hidden_states[0].shape[-2:] ) , [self.model_tester.seq_length, self.model_tester.hidden_size] , )
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(output_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
@slow
def __lowerCAmelCase ( self : Union[str, Any] ) -> Any:
__magic_name__ : Optional[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
self.assertIsNotNone(_A )
def __lowerCAmelCase ( self : List[str] ) -> Any:
__magic_name__ , __magic_name__ : Tuple = self.model_tester.prepare_config_and_inputs_for_common()
__magic_name__ : str = True
__magic_name__ : Optional[int] = getattr(self.model_tester , 'decoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'encoder_seq_length' , self.model_tester.seq_length )
__magic_name__ : List[Any] = getattr(self.model_tester , 'key_length' , _A )
__magic_name__ : Optional[int] = getattr(self.model_tester , 'key_length' , _A )
def check_decoder_attentions_output(_A : List[Any] ):
__magic_name__ : Tuple = len(_A )
self.assertEqual(out_len % 2 , 0 )
__magic_name__ : Any = outputs.decoder_attentions
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(decoder_attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, decoder_seq_length, decoder_key_length] , )
def check_encoder_attentions_output(_A : int ):
__magic_name__ : Dict = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(_A ) , self.model_tester.num_hidden_layers )
self.assertListEqual(
list(attentions[0].shape[-3:] ) , [self.model_tester.num_attention_heads / 2, encoder_seq_length, encoder_key_length] , )
for model_class in self.all_model_classes:
__magic_name__ : Union[str, Any] = True
__magic_name__ : Tuple = False
__magic_name__ : List[str] = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
__magic_name__ : Tuple = len(_A )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
if self.is_encoder_decoder:
__magic_name__ : Any = model_class(_A )
__magic_name__ : Any = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_decoder_attentions_output(_A )
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
__magic_name__ : Optional[int] = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : Optional[int] = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
# Check attention is always last and order is fine
__magic_name__ : str = True
__magic_name__ : str = True
__magic_name__ : Optional[int] = model_class(_A )
__magic_name__ : str = model(self._prepare_for_class(_A , _A ) )
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1) , len(_A ) )
self.assertEqual(model.config.output_hidden_states , _A )
check_encoder_attentions_output(_A )
@require_tf
class _lowerCamelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def __lowerCAmelCase ( self : int ) -> int:
__magic_name__ : List[Any] = TFConvBertModel.from_pretrained('YituTech/conv-bert-base' )
__magic_name__ : Union[str, Any] = tf.constant([[0, 1, 2, 3, 4, 5]] )
__magic_name__ : Tuple = model(_A )[0]
__magic_name__ : str = [1, 6, 768]
self.assertEqual(output.shape , _A )
__magic_name__ : Tuple = tf.constant(
[
[
[-0.0347_5493, -0.468_6034, -0.3063_8832],
[0.2263_7248, -0.2698_8646, -0.742_3424],
[0.1032_4868, -0.4501_3508, -0.5828_0784],
]
] )
tf.debugging.assert_near(output[:, :3, :3] , _A , atol=1E-4 ) | 331 | 0 |
"""simple docstring"""
from collections import OrderedDict
from typing import Any, Mapping, Optional
from ... import PreTrainedTokenizer, TensorType, is_torch_available
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfigWithPast
from ...utils import logging
__lowercase = logging.get_logger(__name__)
__lowercase = {
"""EleutherAI/gpt-neo-1.3B""": """https://huggingface.co/EleutherAI/gpt-neo-1.3B/resolve/main/config.json""",
# See all GPTNeo models at https://huggingface.co/models?filter=gpt_neo
}
class _A ( _a ):
"""simple docstring"""
UpperCAmelCase : Tuple = """gpt_neo"""
UpperCAmelCase : str = ["""past_key_values"""]
UpperCAmelCase : Optional[Any] = {"""num_attention_heads""": """num_heads""", """num_hidden_layers""": """num_layers"""}
def __init__( self : Optional[Any] , __UpperCAmelCase : Optional[Any]=50257 , __UpperCAmelCase : int=2048 , __UpperCAmelCase : List[Any]=2048 , __UpperCAmelCase : int=24 , __UpperCAmelCase : Optional[Any]=[[["global", "local"], 12]] , __UpperCAmelCase : Optional[int]=16 , __UpperCAmelCase : Dict=None , __UpperCAmelCase : Optional[int]=256 , __UpperCAmelCase : Union[str, Any]="gelu_new" , __UpperCAmelCase : Dict=0.0 , __UpperCAmelCase : Dict=0.0 , __UpperCAmelCase : Any=0.0 , __UpperCAmelCase : Optional[Any]=0.1 , __UpperCAmelCase : str=1e-5 , __UpperCAmelCase : List[Any]=0.02 , __UpperCAmelCase : Tuple=True , __UpperCAmelCase : List[Any]=50256 , __UpperCAmelCase : Optional[Any]=50256 , **__UpperCAmelCase : List[str] , ):
a : List[Any] = vocab_size
a : Optional[int] = max_position_embeddings
a : Tuple = hidden_size
a : Optional[Any] = num_layers
a : Optional[int] = num_heads
a : Optional[int] = intermediate_size
a : List[str] = window_size
a : Union[str, Any] = activation_function
a : Union[str, Any] = resid_dropout
a : List[Any] = embed_dropout
a : Any = attention_dropout
a : List[str] = classifier_dropout
a : Any = layer_norm_epsilon
a : Union[str, Any] = initializer_range
a : Dict = use_cache
a : Any = bos_token_id
a : List[str] = eos_token_id
a : Any = attention_types
a : Dict = self.expand_attention_types_params(__UpperCAmelCase)
if len(self.attention_layers) != self.num_layers:
raise ValueError(
"Configuration for convolutional module is incorrect. "
"It is required that `len(config.attention_layers)` == `config.num_layers` "
f'''but is `len(config.attention_layers) = {len(self.attention_layers)}`, '''
f'''`config.num_layers = {self.num_layers}`. '''
"`config.attention_layers` is prepared using `config.attention_types`. "
"Please verify the value of `config.attention_types` argument.")
super().__init__(bos_token_id=__UpperCAmelCase , eos_token_id=__UpperCAmelCase , **__UpperCAmelCase)
@staticmethod
def __snake_case ( __UpperCAmelCase : Tuple):
a : List[str] = []
for item in attention_types:
for _ in range(item[1]):
attentions.extend(item[0])
return attentions
def lowercase ( A_ , A_ , A_ , A_ )-> str:
'''simple docstring'''
import torch
a : Dict = input.size()
a : Any = len(A_ )
a : Optional[int] = shape[dimension]
a : Optional[Any] = torch.arange(0 , A_ , A_ )
a : Tuple = torch.div(sizedim - size , A_ , rounding_mode="floor" ) + 1
a : Optional[Any] = torch.arange(A_ ) + low_indices[:min_length][:, None]
a : List[str] = [slice(A_ )] * rank
a : Union[str, Any] = indices
a : str = input[s]
a : int = list(range(0 , rank + 1 ) )
perm.append(perm.pop(dimension + 1 ) )
return sliced.permute(A_ )
def lowercase ( A_ , A_ )-> str:
'''simple docstring'''
import torch
a : Tuple = torch.arange(1 , A_ )
a : str = torch.remainder(A_ , A_ )
a : Optional[Any] = remainders == 0
a : Tuple = candidates[divisor_indices]
a : List[Any] = torch.max(A_ )
return largest_divisor, torch.div(A_ , A_ , rounding_mode="floor" )
class _A ( _a ):
"""simple docstring"""
@property
def __snake_case ( self : Tuple):
a : str = OrderedDict({"input_ids": {0: "batch", 1: "sequence"}})
if self.use_past:
self.fill_with_past_key_values_(__UpperCAmelCase , direction="inputs")
a : List[Any] = {0: "batch", 1: "past_sequence + sequence"}
else:
a : Union[str, Any] = {0: "batch", 1: "sequence"}
return common_inputs
@property
def __snake_case ( self : Any):
return self._config.num_heads
def __snake_case ( self : Optional[Any] , __UpperCAmelCase : PreTrainedTokenizer , __UpperCAmelCase : int = -1 , __UpperCAmelCase : int = -1 , __UpperCAmelCase : bool = False , __UpperCAmelCase : Optional[TensorType] = None , ):
a : Optional[int] = super(__UpperCAmelCase , self).generate_dummy_inputs(
__UpperCAmelCase , batch_size=__UpperCAmelCase , seq_length=__UpperCAmelCase , is_pair=__UpperCAmelCase , framework=__UpperCAmelCase)
# We need to order the input in the way they appears in the forward()
a : Dict = OrderedDict({"input_ids": common_inputs["input_ids"]})
# Need to add the past_keys
if self.use_past:
if not is_torch_available():
raise ValueError("Cannot generate dummy past_keys inputs without PyTorch installed.")
else:
import torch
a , a : Dict = common_inputs["input_ids"].shape
# Not using the same length for past_key_values
a : Tuple = seqlen + 2
a : Tuple = (
batch,
self.num_attention_heads,
past_key_values_length,
self._config.hidden_size // self.num_attention_heads,
)
a : Union[str, Any] = [
(torch.zeros(__UpperCAmelCase), torch.zeros(__UpperCAmelCase)) for _ in range(self.num_layers)
]
a : List[Any] = common_inputs["attention_mask"]
if self.use_past:
a : Optional[int] = ordered_inputs["attention_mask"].dtype
a : Tuple = torch.cat(
[ordered_inputs["attention_mask"], torch.ones(__UpperCAmelCase , __UpperCAmelCase , dtype=__UpperCAmelCase)] , dim=1)
return ordered_inputs
@property
def __snake_case ( self : str):
return 13
| 40 |
'''simple docstring'''
import os
import tempfile
from functools import partial
from unittest import TestCase
from unittest.mock import patch
import numpy as np
import pytest
from datasets.arrow_dataset import Dataset
from datasets.search import ElasticSearchIndex, FaissIndex, MissingIndex
from .utils import require_elasticsearch, require_faiss
lowerCAmelCase :Dict = pytest.mark.integration
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Optional[Any] ) -> Union[str, Any]:
__magic_name__ : str = Dataset.from_dict({'filename': ['my_name-train' + '_' + str(_A ) for x in np.arange(30 ).tolist()]} )
return dset
def __lowerCAmelCase ( self : List[str] ) -> Tuple:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
__magic_name__ : Union[str, Any] = dset.map(
lambda _A , _A : {"vecs": i * np.ones(5 , dtype=np.floataa )} , with_indices=_A , keep_in_memory=_A )
__magic_name__ : int = dset.add_faiss_index('vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT )
__magic_name__ , __magic_name__ : List[str] = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
dset.drop_index('vecs' )
def __lowerCAmelCase ( self : Any ) -> str:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , batch_size=100 , metric_type=faiss.METRIC_INNER_PRODUCT , )
__magic_name__ , __magic_name__ : Any = dset.get_nearest_examples('vecs' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Tuple ) -> int:
import faiss
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' , metric_type=faiss.METRIC_INNER_PRODUCT , )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
dset.save_faiss_index('vecs' , tmp_file.name )
dset.load_faiss_index('vecs2' , tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ , __magic_name__ : Dict = dset.get_nearest_examples('vecs2' , np.ones(5 , dtype=np.floataa ) )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Optional[int]:
__magic_name__ : Dataset = self._create_dummy_dataset()
dset.add_faiss_index_from_external_arrays(
external_arrays=np.ones((30, 5) ) * np.arange(30 ).reshape(-1 , 1 ) , index_name='vecs' )
dset.drop_index('vecs' )
self.assertRaises(_A , partial(dset.get_nearest_examples , 'vecs2' , np.ones(5 , dtype=np.floataa ) ) )
def __lowerCAmelCase ( self : List[Any] ) -> Tuple:
from elasticsearch import Elasticsearch
__magic_name__ : Dataset = self._create_dummy_dataset()
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : int = {'acknowledged': True}
mocked_bulk.return_value([(True, None)] * 30 )
__magic_name__ : List[Any] = {'hits': {'hits': [{'_score': 1, '_id': 29}]}}
__magic_name__ : Union[str, Any] = Elasticsearch()
dset.add_elasticsearch_index('filename' , es_client=_A )
__magic_name__ , __magic_name__ : Tuple = dset.get_nearest_examples('filename' , 'my_name-train_29' )
self.assertEqual(examples['filename'][0] , 'my_name-train_29' )
@require_faiss
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> List[Any]:
import faiss
__magic_name__ : int = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
# add vectors
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsNotNone(index.faiss_index )
self.assertEqual(index.faiss_index.ntotal , 5 )
index.add_vectors(np.zeros((5, 5) , dtype=np.floataa ) )
self.assertEqual(index.faiss_index.ntotal , 10 )
# single query
__magic_name__ : str = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Optional[int] = 1
__magic_name__ , __magic_name__ : str = index.search(_A )
self.assertRaises(_A , index.search , query.reshape(-1 , 1 ) )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
# batched queries
__magic_name__ : Optional[Any] = np.eye(5 , dtype=np.floataa )[::-1]
__magic_name__ , __magic_name__ : str = index.search_batch(_A )
self.assertRaises(_A , index.search_batch , queries[0] )
__magic_name__ : List[Any] = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([4, 3, 2, 1, 0] , _A )
def __lowerCAmelCase ( self : Dict ) -> Optional[Any]:
import faiss
__magic_name__ : str = FaissIndex(string_factory='Flat' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
__magic_name__ : str = FaissIndex(string_factory='LSH' )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexLSH )
with self.assertRaises(_A ):
__magic_name__ : Dict = FaissIndex(string_factory='Flat' , custom_index=faiss.IndexFlat(5 ) )
def __lowerCAmelCase ( self : Union[str, Any] ) -> Dict:
import faiss
__magic_name__ : Any = faiss.IndexFlat(5 )
__magic_name__ : Optional[Any] = FaissIndex(custom_index=_A )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
self.assertIsInstance(index.faiss_index , faiss.IndexFlat )
def __lowerCAmelCase ( self : Dict ) -> Tuple:
import faiss
__magic_name__ : Optional[int] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
# Setting delete=False and unlinking manually is not pretty... but it is required on Windows to
# ensure somewhat stable behaviour. If we don't, we get PermissionErrors. This is an age-old issue.
# see https://bugs.python.org/issue14243 and
# https://stackoverflow.com/questions/23212435/permission-denied-to-write-to-my-temporary-file/23212515
with tempfile.NamedTemporaryFile(delete=_A ) as tmp_file:
index.save(tmp_file.name )
__magic_name__ : Optional[int] = FaissIndex.load(tmp_file.name )
os.unlink(tmp_file.name )
__magic_name__ : Dict = np.zeros(5 , dtype=np.floataa )
__magic_name__ : Tuple = 1
__magic_name__ , __magic_name__ : Optional[Any] = index.search(_A )
self.assertGreater(scores[0] , 0 )
self.assertEqual(indices[0] , 1 )
@require_faiss
def lowerCamelCase ( lowerCAmelCase : Tuple ):
"""simple docstring"""
import faiss
__magic_name__ : Union[str, Any] = FaissIndex(metric_type=faiss.METRIC_INNER_PRODUCT )
index.add_vectors(np.eye(5 , dtype=np.floataa ) )
__magic_name__ : Dict = 'index.faiss'
__magic_name__ : Optional[Any] = f'mock://{index_name}'
index.save(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Tuple = FaissIndex.load(lowerCAmelCase , storage_options=mockfs.storage_options )
__magic_name__ : Union[str, Any] = np.zeros(5 , dtype=np.floataa )
__magic_name__ : List[str] = 1
__magic_name__ , __magic_name__ : Dict = index.search(lowerCAmelCase )
assert scores[0] > 0
assert indices[0] == 1
@require_elasticsearch
class _lowerCamelCase ( lowercase__ ):
'''simple docstring'''
def __lowerCAmelCase ( self : Tuple ) -> Dict:
from elasticsearch import Elasticsearch
with patch('elasticsearch.Elasticsearch.search' ) as mocked_search, patch(
'elasticsearch.client.IndicesClient.create' ) as mocked_index_create, patch('elasticsearch.helpers.streaming_bulk' ) as mocked_bulk:
__magic_name__ : Any = Elasticsearch()
__magic_name__ : Union[str, Any] = {'acknowledged': True}
__magic_name__ : Tuple = ElasticSearchIndex(es_client=_A )
mocked_bulk.return_value([(True, None)] * 3 )
index.add_documents(['foo', 'bar', 'foobar'] )
# single query
__magic_name__ : str = 'foo'
__magic_name__ : str = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# single query with timeout
__magic_name__ : str = 'foo'
__magic_name__ : Dict = {'hits': {'hits': [{'_score': 1, '_id': 0}]}}
__magic_name__ , __magic_name__ : Dict = index.search(_A , request_timeout=30 )
self.assertEqual(scores[0] , 1 )
self.assertEqual(indices[0] , 0 )
# batched queries
__magic_name__ : Optional[Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Optional[Any] = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Optional[Any] = index.search_batch(_A )
__magic_name__ : Tuple = [scores[0] for scores in total_scores]
__magic_name__ : List[str] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A )
# batched queries with timeout
__magic_name__ : Union[str, Any] = ['foo', 'bar', 'foobar']
__magic_name__ : Tuple = {'hits': {'hits': [{'_score': 1, '_id': 1}]}}
__magic_name__ , __magic_name__ : Dict = index.search_batch(_A , request_timeout=30 )
__magic_name__ : Optional[int] = [scores[0] for scores in total_scores]
__magic_name__ : Union[str, Any] = [indices[0] for indices in total_indices]
self.assertGreater(np.min(_A ) , 0 )
self.assertListEqual([1, 1, 1] , _A ) | 331 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.