code
stringlengths 81
54k
| code_codestyle
int64 0
721
| style_context
stringlengths 91
41.9k
| style_context_codestyle
int64 0
699
| label
int64 0
1
|
---|---|---|---|---|
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from transformers import CLIPImageProcessor, CLIPVisionConfig, CLIPVisionModel
from diffusers import HeunDiscreteScheduler, PriorTransformer, ShapEImgaImgPipeline
from diffusers.pipelines.shap_e import ShapERenderer
from diffusers.utils import floats_tensor, load_image, load_numpy, slow
from diffusers.utils.testing_utils import require_torch_gpu, torch_device
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = ShapEImgaImgPipeline
a_ = ["""image"""]
a_ = ["""image"""]
a_ = [
"""num_images_per_prompt""",
"""num_inference_steps""",
"""generator""",
"""latents""",
"""guidance_scale""",
"""frame_size""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : str ):
'''simple docstring'''
return 32
@property
def _a ( self : str ):
'''simple docstring'''
return 32
@property
def _a ( self : List[Any] ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : List[str] ):
'''simple docstring'''
return 8
@property
def _a ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Tuple = CLIPVisionConfig(
hidden_size=self.text_embedder_hidden_size ,image_size=64 ,projection_dim=self.text_embedder_hidden_size ,intermediate_size=37 ,num_attention_heads=4 ,num_channels=3 ,num_hidden_layers=5 ,patch_size=1 ,)
A_ : Optional[Any] = CLIPVisionModel(_a )
return model
@property
def _a ( self : str ):
'''simple docstring'''
A_ : Tuple = CLIPImageProcessor(
crop_size=224 ,do_center_crop=_a ,do_normalize=_a ,do_resize=_a ,image_mean=[0.48145466, 0.4578275, 0.40821073] ,image_std=[0.26862954, 0.26130258, 0.27577711] ,resample=3 ,size=224 ,)
return image_processor
@property
def _a ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Tuple = {
"""num_attention_heads""": 2,
"""attention_head_dim""": 16,
"""embedding_dim""": self.time_input_dim,
"""num_embeddings""": 32,
"""embedding_proj_dim""": self.text_embedder_hidden_size,
"""time_embed_dim""": self.time_embed_dim,
"""num_layers""": 1,
"""clip_embed_dim""": self.time_input_dim * 2,
"""additional_embeddings""": 0,
"""time_embed_act_fn""": """gelu""",
"""norm_in_type""": """layer""",
"""embedding_proj_norm_type""": """layer""",
"""encoder_hid_proj_type""": None,
"""added_emb_type""": None,
}
A_ : Optional[Any] = PriorTransformer(**_a )
return model
@property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[str] = {
"""param_shapes""": (
(self.renderer_dim, 93),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
(self.renderer_dim, 8),
),
"""d_latent""": self.time_input_dim,
"""d_hidden""": self.renderer_dim,
"""n_output""": 12,
"""background""": (
0.1,
0.1,
0.1,
),
}
A_ : List[Any] = ShapERenderer(**_a )
return model
def _a ( self : List[str] ):
'''simple docstring'''
A_ : str = self.dummy_prior
A_ : Union[str, Any] = self.dummy_image_encoder
A_ : List[Any] = self.dummy_image_processor
A_ : Union[str, Any] = self.dummy_renderer
A_ : List[Any] = HeunDiscreteScheduler(
beta_schedule="""exp""" ,num_train_timesteps=1024 ,prediction_type="""sample""" ,use_karras_sigmas=_a ,clip_sample=_a ,clip_sample_range=1.0 ,)
A_ : Tuple = {
"""prior""": prior,
"""image_encoder""": image_encoder,
"""image_processor""": image_processor,
"""renderer""": renderer,
"""scheduler""": scheduler,
}
return components
def _a ( self : int ,_a : Dict ,_a : List[str]=0 ):
'''simple docstring'''
A_ : Union[str, Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
A_ : Optional[int] = torch.manual_seed(_a )
else:
A_ : List[str] = torch.Generator(device=_a ).manual_seed(_a )
A_ : Tuple = {
"""image""": input_image,
"""generator""": generator,
"""num_inference_steps""": 1,
"""frame_size""": 32,
"""output_type""": """np""",
}
return inputs
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[Any] = """cpu"""
A_ : Tuple = self.get_dummy_components()
A_ : Optional[int] = self.pipeline_class(**_a )
A_ : Optional[int] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = pipe(**self.get_dummy_inputs(_a ) )
A_ : Tuple = output.images[0]
A_ : List[Any] = image[0, -3:, -3:, -1]
assert image.shape == (20, 32, 32, 3)
A_ : Dict = np.array(
[
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
0.00039216,
] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Optional[int] ):
'''simple docstring'''
self._test_inference_batch_consistent(batch_sizes=[1, 2] )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : int = torch_device == """cpu"""
A_ : List[Any] = True
self._test_inference_batch_single_identical(
batch_size=2 ,test_max_difference=_a ,relax_max_difference=_a ,)
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : str = self.get_dummy_components()
A_ : Optional[int] = self.pipeline_class(**_a )
A_ : List[str] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : str = 1
A_ : str = 2
A_ : List[Any] = self.get_dummy_inputs(_a )
for key in inputs.keys():
if key in self.batch_params:
A_ : str = batch_size * [inputs[key]]
A_ : Dict = pipe(**_a ,num_images_per_prompt=_a )[0]
assert images.shape[0] == batch_size * num_images_per_prompt
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Optional[Any] ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main""" """/shap_e/corgi.png""" )
A_ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/shap_e/test_shap_e_img2img_out.npy""" )
A_ : Tuple = ShapEImgaImgPipeline.from_pretrained("""openai/shap-e-img2img""" )
A_ : Union[str, Any] = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Dict = torch.Generator(device=_a ).manual_seed(0 )
A_ : Optional[Any] = pipe(
_a ,generator=_a ,guidance_scale=3.0 ,num_inference_steps=64 ,frame_size=64 ,output_type="""np""" ,).images[0]
assert images.shape == (20, 64, 64, 3)
assert_mean_pixel_difference(_a ,_a )
| 27 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Tuple = size if size is not None else {"""shortest_edge""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" )
A_ : Any = do_resize
A_ : List[str] = size
A_ : Union[str, Any] = resample
A_ : Dict = do_center_crop
A_ : List[str] = crop_size
A_ : Any = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Any = do_normalize
A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Tuple = do_convert_rgb
def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a )
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : Optional[int] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,):
'''simple docstring'''
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,):
'''simple docstring'''
A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A_ : Tuple = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a )
A_ : List[str] = resample if resample is not None else self.resample
A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a )
A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Any = do_normalize if do_normalize is not None else self.do_normalize
A_ : int = image_mean if image_mean is not None else self.image_mean
A_ : int = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : Optional[int] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
A_ : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images]
if do_center_crop:
A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images]
if do_normalize:
A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images]
A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images]
A_ : List[str] = {"""pixel_values""": images}
return BatchFeature(data=_a ,tensor_type=_a )
| 27 | 1 |
'''simple docstring'''
import math
__magic_name__ = 10
__magic_name__ = 7
__magic_name__ = BALLS_PER_COLOUR * NUM_COLOURS
def lowerCamelCase ( lowerCamelCase : int = 20):
A_ : Dict = math.comb(lowerCamelCase , lowerCamelCase)
A_ : Any = math.comb(NUM_BALLS - BALLS_PER_COLOUR , lowerCamelCase)
A_ : Dict = NUM_COLOURS * (1 - missing_colour / total)
return F'{result:.9f}'
if __name__ == "__main__":
print(solution(20))
| 27 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ):
'''simple docstring'''
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 27 | 1 |
'''simple docstring'''
# Copyright 2021 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
from accelerate.utils import ComputeEnvironment
from .cluster import get_cluster_input
from .config_args import cache_dir, default_config_file, default_yaml_config_file, load_config_from_file # noqa: F401
from .config_utils import _ask_field, _ask_options, _convert_compute_environment # noqa: F401
from .sagemaker import get_sagemaker_input
__magic_name__ = 'Launches a series of prompts to create and save a `default_config.yaml` configuration file for your training system. Should always be ran first on your machine'
def lowerCamelCase ( ):
A_ : Any = _ask_options(
"""In which compute environment are you running?""" , ["""This machine""", """AWS (Amazon SageMaker)"""] , _convert_compute_environment , )
if compute_environment == ComputeEnvironment.AMAZON_SAGEMAKER:
A_ : str = get_sagemaker_input()
else:
A_ : List[Any] = get_cluster_input()
return config
def lowerCamelCase ( lowerCamelCase : Union[str, Any]=None):
if subparsers is not None:
A_ : Tuple = subparsers.add_parser("""config""" , description=lowerCamelCase)
else:
A_ : str = argparse.ArgumentParser("""Accelerate config command""" , description=lowerCamelCase)
parser.add_argument(
"""--config_file""" , default=lowerCamelCase , help=(
"""The path to use to store the config file. Will default to a file named default_config.yaml in the cache """
"""location, which is the content of the environment `HF_HOME` suffixed with 'accelerate', or if you don't have """
"""such an environment variable, your cache directory ('~/.cache' or the content of `XDG_CACHE_HOME`) suffixed """
"""with 'huggingface'."""
) , )
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase)
return parser
def lowerCamelCase ( lowerCamelCase : str):
A_ : int = get_user_input()
if args.config_file is not None:
A_ : Dict = args.config_file
else:
if not os.path.isdir(lowerCamelCase):
os.makedirs(lowerCamelCase)
A_ : Optional[int] = default_yaml_config_file
if config_file.endswith(""".json"""):
config.to_json_file(lowerCamelCase)
else:
config.to_yaml_file(lowerCamelCase)
print(F'accelerate configuration saved at {config_file}')
def lowerCamelCase ( ):
A_ : Optional[Any] = config_command_parser()
A_ : Any = parser.parse_args()
config_command(lowerCamelCase)
if __name__ == "__main__":
main()
| 27 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ):
A_ : int = symbols(lowerCamelCase)
A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase)
A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase))
A_ : str = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError("""Could not find root""") from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
A_ : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 27 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : list[int] , lowerCamelCase : int):
def count_of_possible_combinations(lowerCamelCase : int) -> int:
if target < 0:
return 0
if target == 0:
return 1
return sum(count_of_possible_combinations(target - item) for item in array)
return count_of_possible_combinations(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : list[int] , lowerCamelCase : int):
def count_of_possible_combinations_with_dp_array(
lowerCamelCase : int , lowerCamelCase : list[int]) -> int:
if target < 0:
return 0
if target == 0:
return 1
if dp_array[target] != -1:
return dp_array[target]
A_ : int = sum(
count_of_possible_combinations_with_dp_array(target - item , lowerCamelCase)
for item in array)
A_ : Optional[Any] = answer
return answer
A_ : Optional[Any] = [-1] * (target + 1)
return count_of_possible_combinations_with_dp_array(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : list[int] , lowerCamelCase : int):
A_ : Dict = [0] * (target + 1)
A_ : Dict = 1
for i in range(1 , target + 1):
for j in range(lowerCamelCase):
if i - array[j] >= 0:
dp_array[i] += dp_array[i - array[j]]
return dp_array[target]
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = 3
__magic_name__ = 5
__magic_name__ = [1, 2, 5]
print(combination_sum_iv(n, array, target))
| 27 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_a : Dict ):
'''simple docstring'''
super().__init__()
A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a )
A_ : int = list(model.children() )[:-2]
A_ : int = nn.Sequential(*_a )
A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self : str ,_a : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.pool(self.model(_a ) )
A_ : Any = torch.flatten(_a ,start_dim=2 )
A_ : str = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Dict = [json.loads(_a ) for l in open(_a )]
A_ : Optional[int] = os.path.dirname(_a )
A_ : Optional[Any] = tokenizer
A_ : Optional[Any] = labels
A_ : List[Any] = len(_a )
A_ : str = max_seq_length
A_ : str = transforms
def __len__( self : str ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) )
A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1]
A_ : Optional[int] = sentence[: self.max_seq_length]
A_ : Any = torch.zeros(self.n_classes )
A_ : Tuple = 1
A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" )
A_ : Union[str, Any] = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( lowerCamelCase : str):
A_ : List[Any] = [len(row["""sentence"""]) for row in batch]
A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase)
A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)):
A_ : str = input_row["""sentence"""]
A_ : Tuple = 1
A_ : int = torch.stack([row["""image"""] for row in batch])
A_ : str = torch.stack([row["""label"""] for row in batch])
A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch])
A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
])
| 27 | 1 |
'''simple docstring'''
import string
def lowerCamelCase ( lowerCamelCase : str):
A_ : List[str] = """"""
for i in sequence:
A_ : Dict = ord(lowerCamelCase)
if 65 <= extract <= 90:
output += chr(155 - extract)
elif 97 <= extract <= 122:
output += chr(219 - extract)
else:
output += i
return output
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = string.ascii_letters
A_ : int = string.ascii_lowercase[::-1] + string.ascii_uppercase[::-1]
return "".join(
letters_reversed[letters.index(lowerCamelCase)] if c in letters else c for c in sequence)
def lowerCamelCase ( ):
from timeit import timeit
print("""Running performance benchmarks...""")
A_ : Optional[int] = """from string import printable ; from __main__ import atbash, atbash_slow"""
print(F'> atbash_slow(): {timeit("atbash_slow(printable)" , setup=lowerCamelCase)} seconds')
print(F'> atbash(): {timeit("atbash(printable)" , setup=lowerCamelCase)} seconds')
if __name__ == "__main__":
for example in ("ABCDEFGH", "123GGjj", "testStringtest", "with space"):
print(f"""{example} encrypted in atbash: {atbash(example)}""")
benchmark()
| 27 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCamelCase : int):
if num <= 0:
A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCamelCase)
A_ : str = [True] * (num + 1)
A_ : Tuple = []
A_ : str = 2
A_ : Any = int(math.sqrt(lowerCamelCase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase)
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(lowerCamelCase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 27 | 1 |
'''simple docstring'''
import itertools
import string
from collections.abc import Generator, Iterable
def lowerCamelCase ( lowerCamelCase : Iterable[str] , lowerCamelCase : int):
A_ : List[str] = iter(lowerCamelCase)
while True:
A_ : List[Any] = tuple(itertools.islice(lowerCamelCase , lowerCamelCase))
if not chunk:
return
yield chunk
def lowerCamelCase ( lowerCamelCase : str):
A_ : Optional[Any] = """""".join([c.upper() for c in dirty if c in string.ascii_letters])
A_ : Optional[int] = """"""
if len(lowerCamelCase) < 2:
return dirty
for i in range(len(lowerCamelCase) - 1):
clean += dirty[i]
if dirty[i] == dirty[i + 1]:
clean += "X"
clean += dirty[-1]
if len(lowerCamelCase) & 1:
clean += "X"
return clean
def lowerCamelCase ( lowerCamelCase : str):
# I and J are used interchangeably to allow
# us to use a 5x5 table (25 letters)
A_ : Optional[int] = """ABCDEFGHIKLMNOPQRSTUVWXYZ"""
# we're using a list instead of a '2d' array because it makes the math
# for setting up the table and doing the actual encoding/decoding simpler
A_ : Optional[int] = []
# copy key chars into the table if they are in `alphabet` ignoring duplicates
for char in key.upper():
if char not in table and char in alphabet:
table.append(lowerCamelCase)
# fill the rest of the table in with the remaining alphabet chars
for char in alphabet:
if char not in table:
table.append(lowerCamelCase)
return table
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : int = generate_table(lowerCamelCase)
A_ : Union[str, Any] = prepare_input(lowerCamelCase)
A_ : List[Any] = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCamelCase , 2):
A_ , A_ : List[str] = divmod(table.index(lowerCamelCase) , 5)
A_ , A_ : Union[str, Any] = divmod(table.index(lowerCamelCase) , 5)
if rowa == rowa:
ciphertext += table[rowa * 5 + (cola + 1) % 5]
ciphertext += table[rowa * 5 + (cola + 1) % 5]
elif cola == cola:
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
ciphertext += table[((rowa + 1) % 5) * 5 + cola]
else: # rectangle
ciphertext += table[rowa * 5 + cola]
ciphertext += table[rowa * 5 + cola]
return ciphertext
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : str):
A_ : Tuple = generate_table(lowerCamelCase)
A_ : int = """"""
# https://en.wikipedia.org/wiki/Playfair_cipher#Description
for chara, chara in chunker(lowerCamelCase , 2):
A_ , A_ : List[Any] = divmod(table.index(lowerCamelCase) , 5)
A_ , A_ : Optional[int] = divmod(table.index(lowerCamelCase) , 5)
if rowa == rowa:
plaintext += table[rowa * 5 + (cola - 1) % 5]
plaintext += table[rowa * 5 + (cola - 1) % 5]
elif cola == cola:
plaintext += table[((rowa - 1) % 5) * 5 + cola]
plaintext += table[((rowa - 1) % 5) * 5 + cola]
else: # rectangle
plaintext += table[rowa * 5 + cola]
plaintext += table[rowa * 5 + cola]
return plaintext
| 27 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__magic_name__ = trt.Logger(trt.Logger.WARNING)
__magic_name__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__magic_name__ = parser.parse_args()
if args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__magic_name__ = args.per_device_eval_batch_size
__magic_name__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__magic_name__ = True
__magic_name__ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__magic_name__ = 'temp_engine/bert-fp16.engine'
if args.inta:
__magic_name__ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__magic_name__ = [network.get_input(i) for i in range(network.num_inputs)]
__magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__magic_name__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__magic_name__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__magic_name__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]):
A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase)
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : str = time.time()
A_ : Tuple = end_time - start_time
A_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__magic_name__ = raw_datasets['validation'].column_names
__magic_name__ = 'question' if 'question' in column_names else column_names[0]
__magic_name__ = 'context' if 'context' in column_names else column_names[1]
__magic_name__ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__magic_name__ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__magic_name__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( lowerCamelCase : Dict):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Union[str, Any] = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase)
A_ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__magic_name__ = raw_datasets['validation']
# Validation Feature Creation
__magic_name__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__magic_name__ = default_data_collator
__magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__magic_name__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
A_ : Tuple = postprocess_qa_predictions(
examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : Dict = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase)
__magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize
# Allocate device memory for inputs and outputs.
__magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__magic_name__ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__magic_name__ = 0.0
__magic_name__ = 0
__magic_name__ = timeit.default_timer()
__magic_name__ = None
for step, batch in enumerate(eval_dataloader):
__magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__magic_name__ , __magic_name__ = outputs
__magic_name__ = torch.tensor(start_logits)
__magic_name__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__magic_name__ = nested_truncate(all_preds, len(eval_dataset))
__magic_name__ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds)
__magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 27 | 1 |
'''simple docstring'''
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : list[int] ):
'''simple docstring'''
A_ : Tuple = len(_a )
A_ : Any = [0] * len_array
if len_array > 0:
A_ : Tuple = array[0]
for i in range(1 ,_a ):
A_ : Dict = self.prefix_sum[i - 1] + array[i]
def _a ( self : List[str] ,_a : int ,_a : int ):
'''simple docstring'''
if start == 0:
return self.prefix_sum[end]
return self.prefix_sum[end] - self.prefix_sum[start - 1]
def _a ( self : int ,_a : int ):
'''simple docstring'''
A_ : Optional[int] = {0}
for sum_item in self.prefix_sum:
if sum_item - target_sum in sums:
return True
sums.add(_a )
return False
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ConvNextFeatureExtractor']
__magic_name__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 27 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_mobilevit import MobileViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Tuple ,*_a : List[str] ,**_a : str ):
'''simple docstring'''
warnings.warn(
"""The class MobileViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers."""
""" Please use MobileViTImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 27 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_text_model"""
def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Optional[int] = intermediate_size
A_ : Optional[int] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : int = max_position_embeddings
A_ : str = hidden_act
A_ : Union[str, Any] = layer_norm_eps
A_ : Tuple = attention_dropout
A_ : Union[str, Any] = initializer_range
A_ : List[Any] = initializer_factor
@classmethod
def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : int = cls.get_config_dict(_a ,**_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_vision_model"""
def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,):
'''simple docstring'''
super().__init__(**_a )
A_ : List[str] = hidden_size
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = num_channels
A_ : str = image_size
A_ : List[Any] = patch_size
A_ : int = hidden_act
A_ : List[Any] = layer_norm_eps
A_ : List[str] = attention_dropout
A_ : str = initializer_range
A_ : str = initializer_factor
@classmethod
def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit"""
a_ = True
def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a )
if text_config is None:
A_ : List[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
A_ : Dict = OwlViTTextConfig(**_a )
A_ : Dict = OwlViTVisionConfig(**_a )
A_ : Any = projection_dim
A_ : Optional[int] = logit_scale_init_value
A_ : Optional[int] = return_dict
A_ : Dict = 1.0
@classmethod
def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a )
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
@classmethod
def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ):
'''simple docstring'''
A_ : str = {}
A_ : int = text_config
A_ : Union[str, Any] = vision_config
return cls.from_dict(_a ,**_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : Optional[int] = self.vision_config.to_dict()
A_ : List[Any] = self.__class__.model_type
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _a ( self : str ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a )
A_ : Any = super().generate_dummy_inputs(
processor.image_processor ,batch_size=_a ,framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 14
| 27 | 1 |
'''simple docstring'''
import fire
from utils import calculate_rouge, save_json
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str] , lowerCamelCase : Dict=None , **lowerCamelCase : Any):
A_ : int = [x.strip() for x in open(lowerCamelCase).readlines()]
A_ : Tuple = [x.strip() for x in open(lowerCamelCase).readlines()][: len(lowerCamelCase)]
A_ : Tuple = calculate_rouge(lowerCamelCase , lowerCamelCase , **lowerCamelCase)
if save_path is not None:
save_json(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase)
return metrics # these print nicely
if __name__ == "__main__":
fire.Fire(calculate_rouge_path)
| 27 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""input_features""", """is_longer"""]
def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,)
A_ : Tuple = top_db
A_ : Tuple = truncation
A_ : Optional[Any] = padding
A_ : Optional[int] = fft_window_size
A_ : Dict = (fft_window_size >> 1) + 1
A_ : Any = hop_length
A_ : List[Any] = max_length_s
A_ : Tuple = max_length_s * sampling_rate
A_ : Tuple = sampling_rate
A_ : Optional[int] = frequency_min
A_ : Tuple = frequency_max
A_ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,)
A_ : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def _a ( self : int ):
'''simple docstring'''
A_ : int = copy.deepcopy(self.__dict__ )
A_ : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ):
'''simple docstring'''
A_ : List[str] = spectrogram(
_a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,)
return log_mel_spectrogram.T
def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : int = [0]
# randomly choose index for each part
A_ : List[str] = np.random.choice(ranges[0] )
A_ : int = np.random.choice(ranges[1] )
A_ : Optional[int] = np.random.choice(ranges[2] )
A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :]
A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
A_ : Dict = mel[idx_back : idx_back + chunk_frames, :]
A_ : Optional[int] = torch.tensor(mel[None, None, :] )
A_ : Dict = torch.nn.functional.interpolate(
_a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a )
A_ : str = mel_shrink[0][0].numpy()
A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A_ : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A_ : Tuple = len(_a ) - max_length
A_ : Optional[int] = np.random.randint(0 ,overflow + 1 )
A_ : List[Any] = waveform[idx : idx + max_length]
A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A_ : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 )
A_ : str = False
else:
A_ : str = self._random_mel_fusion(_a ,_a ,_a )
A_ : Optional[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
A_ : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A_ : int = int(max_length / len(_a ) )
A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A_ : List[str] = int(max_length / len(_a ) )
A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) )
A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 )
if truncation == "fusion":
A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : List[str] = truncation if truncation is not None else self.truncation
A_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
A_ : int = is_batched_numpy or (
isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a ,np.ndarray ):
A_ : str = np.asarray(_a ,dtype=np.floataa )
elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Any = [np.asarray(_a )]
# convert to mel spectrogram, truncate and pad if needed.
A_ : str = [
self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a )
for waveform in raw_speech
]
A_ : int = []
A_ : Any = []
for mel, longer in padded_inputs:
input_mel.append(_a )
is_longer.append(_a )
if truncation == "fusion" and sum(_a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A_ : List[Any] = np.random.randint(0 ,len(_a ) )
A_ : List[str] = True
if isinstance(input_mel[0] ,_a ):
A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A_ : List[str] = [[longer] for longer in is_longer]
A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
A_ : int = BatchFeature(_a )
if return_tensors is not None:
A_ : int = input_features.convert_to_tensors(_a )
return input_features
| 27 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : list[int] , lowerCamelCase : list[list[str]] , lowerCamelCase : int , ):
A_ : Dict = len(lowerCamelCase)
# If row is equal to the size of the board it means there are a queen in each row in
# the current board (possible_board)
if row == n:
# We convert the variable possible_board that looks like this: [1, 3, 0, 2] to
# this: ['. Q . . ', '. . . Q ', 'Q . . . ', '. . Q . ']
boards.append([""". """ * i + """Q """ + """. """ * (n - 1 - i) for i in possible_board])
return
# We iterate each column in the row to find all possible results in each row
for col in range(lowerCamelCase):
# We apply that we learned previously. First we check that in the current board
# (possible_board) there are not other same value because if there is it means
# that there are a collision in vertical. Then we apply the two formulas we
# learned before:
#
# 45º: y - x = b or 45: row - col = b
# 135º: y + x = b or row + col = b.
#
# And we verify if the results of this two formulas not exist in their variables
# respectively. (diagonal_right_collisions, diagonal_left_collisions)
#
# If any or these are True it means there is a collision so we continue to the
# next value in the for loop.
if (
col in possible_board
or row - col in diagonal_right_collisions
or row + col in diagonal_left_collisions
):
continue
# If it is False we call dfs function again and we update the inputs
depth_first_search(
[*possible_board, col] , [*diagonal_right_collisions, row - col] , [*diagonal_left_collisions, row + col] , lowerCamelCase , lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : int):
A_ : list[list[str]] = []
depth_first_search([] , [] , [] , lowerCamelCase , lowerCamelCase)
# Print all the boards
for board in boards:
for column in board:
print(lowerCamelCase)
print("""""")
print(len(lowerCamelCase) , """solutions were found.""")
if __name__ == "__main__":
import doctest
doctest.testmod()
n_queens_solution(4)
| 27 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : str = batch_size
A_ : int = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_token_type_ids
A_ : int = use_labels
A_ : Dict = vocab_size
A_ : List[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : int = intermediate_size
A_ : Tuple = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Tuple = type_sequence_label_size
A_ : int = initializer_range
A_ : Optional[Any] = num_labels
A_ : str = num_choices
A_ : Optional[Any] = scope
A_ : List[Any] = self.vocab_size - 1
def _a ( self : Any ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : int = None
A_ : str = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Any = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = OpenAIGPTModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a )
A_ : str = model(_a ,token_type_ids=_a )
A_ : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ):
'''simple docstring'''
A_ : str = OpenAIGPTLMHeadModel(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ):
'''simple docstring'''
A_ : Any = OpenAIGPTDoubleHeadsModel(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : int = OpenAIGPTForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : str = config_and_inputs
A_ : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ):
'''simple docstring'''
A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,)
A_ : Any = inputs_dict["""labels"""]
A_ : Any = inputs_dict["""labels"""]
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,)
A_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = OpenAIGPTModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_a )
A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is
A_ : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : int = model.generate(_a ,do_sample=_a )
self.assertListEqual(output_ids[0].tolist() ,_a )
| 27 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_dpt import DPTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[int] ,*_a : Optional[int] ,**_a : Dict ):
'''simple docstring'''
warnings.warn(
"""The class DPTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use DPTImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 27 |
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 1 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__magic_name__ = BeautifulSoup(res.text, 'html.parser')
__magic_name__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 27 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def lowerCamelCase ( lowerCamelCase : str):
# word like '180' or '身高' or '神'
for char in word:
A_ : Optional[Any] = ord(lowerCamelCase)
if not _is_chinese_char(lowerCamelCase):
return 0
return 1
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Any = set()
for token in tokens:
A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase)
if chinese_word:
word_set.add(lowerCamelCase)
A_ : Any = list(lowerCamelCase)
return word_list
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()):
if not chinese_word_set:
return bert_tokens
A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set])
A_ : str = bert_tokens
A_ , A_ : Any = 0, len(lowerCamelCase)
while start < end:
A_ : Tuple = True
if is_chinese(bert_word[start]):
A_ : List[str] = min(end - start , lowerCamelCase)
for i in range(lowerCamelCase , 1 , -1):
A_ : Tuple = """""".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
A_ : Dict = """##""" + bert_word[j]
A_ : str = start + i
A_ : Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer):
A_ : Union[str, Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws
A_ : int = [get_chinese_word(lowerCamelCase) for r in res]
ltp_res.extend(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : List[Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512)
bert_res.extend(res["""input_ids"""])
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Union[str, Any] = []
for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = []
for id in input_ids:
A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase)
input_tokens.append(lowerCamelCase)
A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase)
A_ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase):
if token[:2] == "##":
A_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)):
ref_id.append(lowerCamelCase)
ref_ids.append(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
return ref_ids
def lowerCamelCase ( lowerCamelCase : Tuple):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""") as f:
A_ : Optional[int] = f.readlines()
A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device
A_ : Dict = BertTokenizer.from_pretrained(args.bert)
A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase)
with open(args.save_path , """w""" , encoding="""utf-8""") as f:
A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__magic_name__ = parser.parse_args()
main(args)
| 27 | 1 |
'''simple docstring'''
from __future__ import annotations
from fractions import Fraction
from math import gcd, sqrt
def lowerCamelCase ( lowerCamelCase : int):
A_ : int = int(number**0.5)
return number == sq * sq
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int , lowerCamelCase : int):
A_ : int = x_num * y_den * z_den + y_num * x_den * z_den + z_num * x_den * y_den
A_ : int = x_den * y_den * z_den
A_ : int = gcd(lowerCamelCase , lowerCamelCase)
top //= hcf
bottom //= hcf
return top, bottom
def lowerCamelCase ( lowerCamelCase : int = 35):
A_ : set = set()
A_ : int
A_ : Fraction = Fraction(0)
A_ : tuple[int, int]
for x_num in range(1 , order + 1):
for x_den in range(x_num + 1 , order + 1):
for y_num in range(1 , order + 1):
for y_den in range(y_num + 1 , order + 1):
# n=1
A_ : List[str] = x_num * y_den + x_den * y_num
A_ : str = x_den * y_den
A_ : Optional[Any] = gcd(lowerCamelCase , lowerCamelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A_ : Optional[Any] = add_three(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
unique_s.add(lowerCamelCase)
# n=2
A_ : Tuple = (
x_num * x_num * y_den * y_den + x_den * x_den * y_num * y_num
)
A_ : List[Any] = x_den * x_den * y_den * y_den
if is_sq(lowerCamelCase) and is_sq(lowerCamelCase):
A_ : Tuple = int(sqrt(lowerCamelCase))
A_ : Any = int(sqrt(lowerCamelCase))
A_ : Tuple = gcd(lowerCamelCase , lowerCamelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A_ : Any = add_three(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
unique_s.add(lowerCamelCase)
# n=-1
A_ : Optional[int] = x_num * y_num
A_ : Dict = x_den * y_num + x_num * y_den
A_ : Any = gcd(lowerCamelCase , lowerCamelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A_ : List[str] = add_three(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
unique_s.add(lowerCamelCase)
# n=2
A_ : str = x_num * x_num * y_num * y_num
A_ : List[str] = (
x_den * x_den * y_num * y_num + x_num * x_num * y_den * y_den
)
if is_sq(lowerCamelCase) and is_sq(lowerCamelCase):
A_ : int = int(sqrt(lowerCamelCase))
A_ : List[Any] = int(sqrt(lowerCamelCase))
A_ : List[Any] = gcd(lowerCamelCase , lowerCamelCase)
z_num //= hcf
z_den //= hcf
if 0 < z_num < z_den <= order:
A_ : Union[str, Any] = add_three(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
unique_s.add(lowerCamelCase)
for num, den in unique_s:
total += Fraction(lowerCamelCase , lowerCamelCase)
return total.denominator + total.numerator
if __name__ == "__main__":
print(f"""{solution() = }""")
| 27 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ):
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Optional[Any] = self.image_processor
def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : List[Any] ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : int ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : str ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 27 | 1 |
'''simple docstring'''
from dataclasses import dataclass
from typing import Optional, Tuple, Union
import torch
import torch.nn as nn
from ..configuration_utils import ConfigMixin, register_to_config
from ..utils import BaseOutput
from .embeddings import GaussianFourierProjection, TimestepEmbedding, Timesteps
from .modeling_utils import ModelMixin
from .unet_ad_blocks import get_down_block, get_mid_block, get_out_block, get_up_block
@dataclass
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = 42
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@register_to_config
def __init__( self : List[Any] ,_a : int = 65536 ,_a : Optional[int] = None ,_a : int = 2 ,_a : int = 2 ,_a : int = 0 ,_a : str = "fourier" ,_a : bool = True ,_a : bool = False ,_a : float = 0.0 ,_a : Tuple[str] = ("DownBlock1DNoSkip", "DownBlock1D", "AttnDownBlock1D") ,_a : Tuple[str] = ("AttnUpBlock1D", "UpBlock1D", "UpBlock1DNoSkip") ,_a : Tuple[str] = "UNetMidBlock1D" ,_a : str = None ,_a : Tuple[int] = (32, 32, 64) ,_a : str = None ,_a : int = 8 ,_a : int = 1 ,_a : bool = False ,):
'''simple docstring'''
super().__init__()
A_ : int = sample_size
# time
if time_embedding_type == "fourier":
A_ : str = GaussianFourierProjection(
embedding_size=8 ,set_W_to_weight=_a ,log=_a ,flip_sin_to_cos=_a )
A_ : Tuple = 2 * block_out_channels[0]
elif time_embedding_type == "positional":
A_ : List[Any] = Timesteps(
block_out_channels[0] ,flip_sin_to_cos=_a ,downscale_freq_shift=_a )
A_ : Optional[int] = block_out_channels[0]
if use_timestep_embedding:
A_ : str = block_out_channels[0] * 4
A_ : List[str] = TimestepEmbedding(
in_channels=_a ,time_embed_dim=_a ,act_fn=_a ,out_dim=block_out_channels[0] ,)
A_ : List[Any] = nn.ModuleList([] )
A_ : int = None
A_ : List[str] = nn.ModuleList([] )
A_ : Optional[Any] = None
# down
A_ : Dict = in_channels
for i, down_block_type in enumerate(_a ):
A_ : Optional[int] = output_channel
A_ : Dict = block_out_channels[i]
if i == 0:
input_channel += extra_in_channels
A_ : str = i == len(_a ) - 1
A_ : List[str] = get_down_block(
_a ,num_layers=_a ,in_channels=_a ,out_channels=_a ,temb_channels=block_out_channels[0] ,add_downsample=not is_final_block or downsample_each_block ,)
self.down_blocks.append(_a )
# mid
A_ : List[Any] = get_mid_block(
_a ,in_channels=block_out_channels[-1] ,mid_channels=block_out_channels[-1] ,out_channels=block_out_channels[-1] ,embed_dim=block_out_channels[0] ,num_layers=_a ,add_downsample=_a ,)
# up
A_ : Tuple = list(reversed(_a ) )
A_ : List[Any] = reversed_block_out_channels[0]
if out_block_type is None:
A_ : Union[str, Any] = out_channels
else:
A_ : Tuple = block_out_channels[0]
for i, up_block_type in enumerate(_a ):
A_ : str = output_channel
A_ : Any = (
reversed_block_out_channels[i + 1] if i < len(_a ) - 1 else final_upsample_channels
)
A_ : Any = i == len(_a ) - 1
A_ : Union[str, Any] = get_up_block(
_a ,num_layers=_a ,in_channels=_a ,out_channels=_a ,temb_channels=block_out_channels[0] ,add_upsample=not is_final_block ,)
self.up_blocks.append(_a )
A_ : Any = output_channel
# out
A_ : str = norm_num_groups if norm_num_groups is not None else min(block_out_channels[0] // 4 ,32 )
A_ : Optional[Any] = get_out_block(
out_block_type=_a ,num_groups_out=_a ,embed_dim=block_out_channels[0] ,out_channels=_a ,act_fn=_a ,fc_dim=block_out_channels[-1] // 4 ,)
def _a ( self : str ,_a : torch.FloatTensor ,_a : Union[torch.Tensor, float, int] ,_a : bool = True ,):
'''simple docstring'''
A_ : Tuple = timestep
if not torch.is_tensor(_a ):
A_ : Any = torch.tensor([timesteps] ,dtype=torch.long ,device=sample.device )
elif torch.is_tensor(_a ) and len(timesteps.shape ) == 0:
A_ : Tuple = timesteps[None].to(sample.device )
A_ : Dict = self.time_proj(_a )
if self.config.use_timestep_embedding:
A_ : int = self.time_mlp(_a )
else:
A_ : int = timestep_embed[..., None]
A_ : Tuple = timestep_embed.repeat([1, 1, sample.shape[2]] ).to(sample.dtype )
A_ : Dict = timestep_embed.broadcast_to((sample.shape[:1] + timestep_embed.shape[1:]) )
# 2. down
A_ : List[str] = ()
for downsample_block in self.down_blocks:
A_ , A_ : Tuple = downsample_block(hidden_states=_a ,temb=_a )
down_block_res_samples += res_samples
# 3. mid
if self.mid_block:
A_ : int = self.mid_block(_a ,_a )
# 4. up
for i, upsample_block in enumerate(self.up_blocks ):
A_ : List[str] = down_block_res_samples[-1:]
A_ : List[Any] = down_block_res_samples[:-1]
A_ : Any = upsample_block(_a ,res_hidden_states_tuple=_a ,temb=_a )
# 5. post-process
if self.out_block:
A_ : Union[str, Any] = self.out_block(_a ,_a )
if not return_dict:
return (sample,)
return UNetaDOutput(sample=_a )
| 27 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""torch""", """torchsde"""]
def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(self ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
| 27 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import List, Optional, Tuple
from ...tokenization_utils import AddedToken
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import is_sentencepiece_available, logging
if is_sentencepiece_available():
from .tokenization_big_bird import BigBirdTokenizer
else:
__magic_name__ = None
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model', 'tokenizer_file': 'tokenizer.json'}
__magic_name__ = {
'vocab_file': {
'google/bigbird-roberta-base': 'https://huggingface.co/google/bigbird-roberta-base/resolve/main/spiece.model',
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/spiece.model'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/spiece.model'
),
},
'tokenizer_file': {
'google/bigbird-roberta-base': (
'https://huggingface.co/google/bigbird-roberta-base/resolve/main/tokenizer.json'
),
'google/bigbird-roberta-large': (
'https://huggingface.co/google/bigbird-roberta-large/resolve/main/tokenizer.json'
),
'google/bigbird-base-trivia-itc': (
'https://huggingface.co/google/bigbird-base-trivia-itc/resolve/main/tokenizer.json'
),
},
}
__magic_name__ = {
'google/bigbird-roberta-base': 4_096,
'google/bigbird-roberta-large': 4_096,
'google/bigbird-base-trivia-itc': 4_096,
}
__magic_name__ = '▁'
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = BigBirdTokenizer
a_ = ["""input_ids""", """attention_mask"""]
a_ = []
def __init__( self : str ,_a : Any=None ,_a : Optional[Any]=None ,_a : str="<unk>" ,_a : Union[str, Any]="<s>" ,_a : Optional[Any]="</s>" ,_a : Any="<pad>" ,_a : str="[SEP]" ,_a : Dict="[MASK]" ,_a : int="[CLS]" ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else bos_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else eos_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else unk_token
A_ : Dict = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else pad_token
A_ : Union[str, Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else cls_token
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else sep_token
# Mask token behave like a normal word, i.e. include the space before it
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
super().__init__(
_a ,tokenizer_file=_a ,bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,**_a ,)
A_ : int = vocab_file
A_ : str = False if not self.vocab_file else True
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : List[Any] = [self.sep_token_id]
A_ : Tuple = [self.cls_token_id]
if token_ids_a is None:
return cls + token_ids_a + sep
return cls + token_ids_a + sep + token_ids_a + sep
def _a ( self : Tuple ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
if token_ids_a is not None:
raise ValueError(
"""You should not supply a second sequence if the provided sequence of """
"""ids is already formatted with special tokens for the model.""" )
return [1 if x in [self.sep_token_id, self.cls_token_id] else 0 for x in token_ids_a]
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1] + ([0] * len(_a )) + [1]
def _a ( self : str ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Union[str, Any] = [self.sep_token_id]
A_ : str = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Union[str, Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not self.can_save_slow_tokenizer:
raise ValueError(
"""Your fast tokenizer does not have the necessary information to save the vocabulary for a slow """
"""tokenizer.""" )
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : Union[str, Any] = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ):
copyfile(self.vocab_file ,_a )
return (out_vocab_file,)
| 27 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"):
A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {}
A_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ):
A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" )
A_ : Any = Path(_a ).joinpath(type_path + """.target""" )
A_ : Dict = self.get_char_lens(self.src_file )
A_ : Optional[int] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A_ : List[Any] = tokenizer
A_ : Optional[Any] = prefix
if n_obs is not None:
A_ : Any = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Tuple = tgt_lang
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str] ,_a : Tuple ):
'''simple docstring'''
A_ : int = index + 1 # linecache starts at 1
A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" )
A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" )
A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" )
A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze()
A_ : Dict = target_inputs["""input_ids"""].squeeze()
A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return [len(_a ) for x in Path(_a ).open().readlines()]
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
A_ : str = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(_a ,_a )
A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a )
A_ : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[List]):
return list(itertools.chain.from_iterable(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json"""))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]):
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase)
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable):
return list(map(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """wb""") as f:
return pickle.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str]):
def remove_articles(lowerCamelCase : Any):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase)
def white_space_fix(lowerCamelCase : List[Any]):
return " ".join(text.split())
def remove_punc(lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = normalize_answer(lowerCamelCase).split()
A_ : Dict = normalize_answer(lowerCamelCase).split()
A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase)
A_ : Any = sum(common.values())
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Any = 0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase):
em += exact_match_score(lowerCamelCase , lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return model_prefix.startswith("""rag""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Tuple = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase):
if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
continue
A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
return hparams, config
| 27 | 1 |
'''simple docstring'''
import argparse
import requests
import torch
from PIL import Image
from transformers import CLIPProcessor, GroupViTConfig, GroupViTModel
def lowerCamelCase ( lowerCamelCase : List[str]):
# vision encoder
if "img_encoder.pos_embed" in name:
A_ : List[Any] = name.replace("""img_encoder.pos_embed""" , """vision_model.embeddings.position_embeddings""")
if "img_encoder.patch_embed.proj" in name:
A_ : Tuple = name.replace("""img_encoder.patch_embed.proj""" , """vision_model.embeddings.patch_embeddings.projection""")
if "img_encoder.patch_embed.norm" in name:
A_ : Any = name.replace("""img_encoder.patch_embed.norm""" , """vision_model.embeddings.layernorm""")
if "img_encoder.layers" in name:
A_ : Union[str, Any] = name.replace("""img_encoder.layers""" , """vision_model.encoder.stages""")
if "blocks" in name and "res" not in name:
A_ : Dict = name.replace("""blocks""" , """layers""")
if "attn" in name and "pre_assign" not in name:
A_ : List[str] = name.replace("""attn""" , """self_attn""")
if "proj" in name and "self_attn" in name and "text" not in name:
A_ : Any = name.replace("""proj""" , """out_proj""")
if "pre_assign_attn.attn.proj" in name:
A_ : Dict = name.replace("""pre_assign_attn.attn.proj""" , """pre_assign_attn.attn.out_proj""")
if "norm1" in name:
A_ : Optional[Any] = name.replace("""norm1""" , """layer_norm1""")
if "norm2" in name and "pre_assign" not in name:
A_ : Optional[int] = name.replace("""norm2""" , """layer_norm2""")
if "img_encoder.norm" in name:
A_ : Union[str, Any] = name.replace("""img_encoder.norm""" , """vision_model.layernorm""")
# text encoder
if "text_encoder.token_embedding" in name:
A_ : Optional[Any] = name.replace("""text_encoder.token_embedding""" , """text_model.embeddings.token_embedding""")
if "text_encoder.positional_embedding" in name:
A_ : Any = name.replace("""text_encoder.positional_embedding""" , """text_model.embeddings.position_embedding.weight""")
if "text_encoder.transformer.resblocks." in name:
A_ : Optional[int] = name.replace("""text_encoder.transformer.resblocks.""" , """text_model.encoder.layers.""")
if "ln_1" in name:
A_ : List[str] = name.replace("""ln_1""" , """layer_norm1""")
if "ln_2" in name:
A_ : Optional[Any] = name.replace("""ln_2""" , """layer_norm2""")
if "c_fc" in name:
A_ : Union[str, Any] = name.replace("""c_fc""" , """fc1""")
if "c_proj" in name:
A_ : List[Any] = name.replace("""c_proj""" , """fc2""")
if "text_encoder" in name:
A_ : List[Any] = name.replace("""text_encoder""" , """text_model""")
if "ln_final" in name:
A_ : int = name.replace("""ln_final""" , """final_layer_norm""")
# projection layers
if "img_projector.linear_hidden." in name:
A_ : Union[str, Any] = name.replace("""img_projector.linear_hidden.""" , """visual_projection.""")
if "img_projector.linear_out." in name:
A_ : Dict = name.replace("""img_projector.linear_out.""" , """visual_projection.3.""")
if "text_projector.linear_hidden" in name:
A_ : Tuple = name.replace("""text_projector.linear_hidden""" , """text_projection""")
if "text_projector.linear_out" in name:
A_ : List[Any] = name.replace("""text_projector.linear_out""" , """text_projection.3""")
return name
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any]):
for key in orig_state_dict.copy().keys():
A_ : Dict = orig_state_dict.pop(lowerCamelCase)
if "qkv" in key:
# weights and biases of the key, value and query projections of vision encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A_ : Optional[int] = key.split(""".""")
A_ , A_ : Optional[int] = int(key_split[2]), int(key_split[4])
A_ : Union[str, Any] = config.vision_config.hidden_size
if "weight" in key:
A_ : Tuple = val[:dim, :]
A_ : int = val[dim : dim * 2, :]
A_ : Union[str, Any] = val[-dim:, :]
else:
A_ : Tuple = val[:dim]
A_ : Optional[Any] = val[dim : dim * 2]
A_ : Tuple = val[-dim:]
elif "in_proj" in key:
# weights and biases of the key, value and query projections of text encoder's attention layers require special treatment:
# we need to split them up into separate matrices/vectors
A_ : Union[str, Any] = key.split(""".""")
A_ : str = int(key_split[3])
A_ : Optional[int] = config.text_config.hidden_size
if "weight" in key:
A_ : Optional[int] = val[:dim, :]
A_ : int = val[
dim : dim * 2, :
]
A_ : List[str] = val[-dim:, :]
else:
A_ : str = val[:dim]
A_ : Union[str, Any] = val[dim : dim * 2]
A_ : int = val[-dim:]
else:
A_ : List[Any] = rename_key(lowerCamelCase)
# squeeze if necessary
if (
"text_projection.0" in new_name
or "text_projection.3" in new_name
or "visual_projection.0" in new_name
or "visual_projection.3" in new_name
):
A_ : List[Any] = val.squeeze_()
else:
A_ : Optional[Any] = val
return orig_state_dict
def lowerCamelCase ( ):
A_ : Union[str, Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Any = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : int , lowerCamelCase : List[str]="groupvit-gcc-yfcc" , lowerCamelCase : Dict=False):
A_ : int = GroupViTConfig()
A_ : Tuple = GroupViTModel(lowerCamelCase).eval()
A_ : int = torch.load(lowerCamelCase , map_location="""cpu""")["""model"""]
A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase)
A_ , A_ : Tuple = model.load_state_dict(lowerCamelCase , strict=lowerCamelCase)
assert missing_keys == ["text_model.embeddings.position_ids"]
assert (unexpected_keys == ["multi_label_logit_scale"]) or (len(lowerCamelCase) == 0)
# verify result
A_ : Union[str, Any] = CLIPProcessor.from_pretrained("""openai/clip-vit-base-patch32""")
A_ : List[Any] = prepare_img()
A_ : Union[str, Any] = processor(text=["""a photo of a cat""", """a photo of a dog"""] , images=lowerCamelCase , padding=lowerCamelCase , return_tensors="""pt""")
with torch.no_grad():
A_ : Optional[int] = model(**lowerCamelCase)
if model_name == "groupvit-gcc-yfcc":
A_ : List[Any] = torch.tensor([[13.3523, 6.3629]])
elif model_name == "groupvit-gcc-redcaps":
A_ : Tuple = torch.tensor([[16.1873, 8.6230]])
else:
raise ValueError(F'Model name {model_name} not supported.')
assert torch.allclose(outputs.logits_per_image , lowerCamelCase , atol=1E-3)
processor.save_pretrained(lowerCamelCase)
model.save_pretrained(lowerCamelCase)
print("""Successfully saved processor and model to""" , lowerCamelCase)
if push_to_hub:
print("""Pushing to the hub...""")
processor.push_to_hub(lowerCamelCase , organization="""nielsr""")
model.push_to_hub(lowerCamelCase , organization="""nielsr""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to dump the processor and PyTorch model.'
)
parser.add_argument('--checkpoint_path', default=None, type=str, help='Path to GroupViT checkpoint')
parser.add_argument(
'--model_name',
default='groupvit-gccy-fcc',
type=str,
help='Name of the model. Expecting either \'groupvit-gcc-yfcc\' or \'groupvit-gcc-redcaps\'',
)
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether or not to push the converted model and processor to the 🤗 hub using the provided `model_name`.',
)
__magic_name__ = parser.parse_args()
convert_groupvit_checkpoint(args.checkpoint_path, args.pytorch_dump_folder_path, args.model_name, args.push_to_hub)
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int):
A_ : list[list[int]] = [[0 for _ in range(lowerCamelCase)] for _ in range(m + 1)]
for i in range(m + 1):
A_ : Union[str, Any] = 1
for n in range(m + 1):
for k in range(1 , lowerCamelCase):
memo[n][k] += memo[n][k - 1]
if n - k > 0:
memo[n][k] += memo[n - k - 1][k]
return memo[m][m - 1]
if __name__ == "__main__":
import sys
if len(sys.argv) == 1:
try:
__magic_name__ = int(input('Enter a number: ').strip())
print(partition(n))
except ValueError:
print('Please enter a number.')
else:
try:
__magic_name__ = int(sys.argv[1])
print(partition(n))
except ValueError:
print('Please pass a number.')
| 27 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyVaaControlnetPipeline
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : Any ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def _a ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Tuple = UNetaDConditionModel(**_a )
return model
@property
def _a ( self : List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.dummy_unet
A_ : int = self.dummy_movq
A_ : Tuple = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,)
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ):
'''simple docstring'''
A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a )
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
A_ : Optional[Any] = torch.manual_seed(_a )
else:
A_ : str = torch.Generator(device=_a ).manual_seed(_a )
A_ : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = """cpu"""
A_ : List[str] = self.get_dummy_components()
A_ : Tuple = self.pipeline_class(**_a )
A_ : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) )
A_ : Tuple = output.images
A_ : Optional[Any] = pipe(
**self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0]
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0
A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_a )
A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa )
A_ : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = """A robot, 4k photo"""
A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ , A_ : List[str] = pipe_prior(
_a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ : List[Any] = pipeline(
image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,)
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a ,_a )
| 27 | 1 |
'''simple docstring'''
from unittest.mock import patch
import pyspark
from datasets.packaged_modules.spark.spark import (
Spark,
SparkExamplesIterable,
_generate_iterable_examples,
)
from ..utils import (
require_dill_gt_0_3_2,
require_not_windows,
)
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Optional[Any]):
A_ : Optional[Any] = []
for part_id in partition_order:
A_ : List[str] = df.where(F'SPARK_PARTITION_ID() = {part_id}').collect()
for row_idx, row in enumerate(lowerCamelCase):
expected_row_ids_and_row_dicts.append((F'{part_id}_{row_idx}', row.asDict()))
return expected_row_ids_and_row_dicts
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
A_ : Optional[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
A_ : int = spark.range(100).repartition(1)
A_ : Tuple = Spark(lowerCamelCase)
# The id ints will be converted to Pyarrow int64s, so each row will be 8 bytes. Setting a max_shard_size of 16 means
# that each partition can hold 2 rows.
spark_builder._repartition_df_if_needed(max_shard_size=16)
# Given that the dataframe has 100 rows and each partition has 2 rows, we expect 50 partitions.
assert spark_builder.df.rdd.getNumPartitions() == 50
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
A_ : str = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
A_ : str = spark.range(10).repartition(2)
A_ : List[str] = [1, 0]
A_ : Any = _generate_iterable_examples(lowerCamelCase , lowerCamelCase) # Reverse the partitions.
A_ : Optional[int] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase , lowerCamelCase)
for i, (row_id, row_dict) in enumerate(generate_fn()):
A_ , A_ : int = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
A_ : str = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
A_ : Optional[int] = spark.range(10).repartition(1)
A_ : Union[str, Any] = SparkExamplesIterable(lowerCamelCase)
assert it.n_shards == 1
for i, (row_id, row_dict) in enumerate(lowerCamelCase):
assert row_id == F'0_{i}'
assert row_dict == {"id": i}
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
A_ : Optional[Any] = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
A_ : int = spark.range(30).repartition(3)
# Mock the generator so that shuffle reverses the partition indices.
with patch("""numpy.random.Generator""") as generator_mock:
A_ : Dict = lambda lowerCamelCase: x.reverse()
A_ : List[Any] = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase , [2, 1, 0])
A_ : Any = SparkExamplesIterable(lowerCamelCase).shuffle_data_sources(lowerCamelCase)
assert shuffled_it.n_shards == 3
for i, (row_id, row_dict) in enumerate(lowerCamelCase):
A_ , A_ : Dict = expected_row_ids_and_row_dicts[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
A_ : str = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
A_ : Tuple = spark.range(20).repartition(4)
# Partitions 0 and 2
A_ : Optional[Any] = SparkExamplesIterable(lowerCamelCase).shard_data_sources(worker_id=0 , num_workers=2)
assert shard_it_a.n_shards == 2
A_ : Any = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase , [0, 2])
for i, (row_id, row_dict) in enumerate(lowerCamelCase):
A_ , A_ : Tuple = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
# Partitions 1 and 3
A_ : str = SparkExamplesIterable(lowerCamelCase).shard_data_sources(worker_id=1 , num_workers=2)
assert shard_it_a.n_shards == 2
A_ : str = _get_expected_row_ids_and_row_dicts_for_partition_order(lowerCamelCase , [1, 3])
for i, (row_id, row_dict) in enumerate(lowerCamelCase):
A_ , A_ : Union[str, Any] = expected_row_ids_and_row_dicts_a[i]
assert row_id == expected_row_id
assert row_dict == expected_row_dict
@require_not_windows
@require_dill_gt_0_3_2
def lowerCamelCase ( ):
A_ : int = pyspark.sql.SparkSession.builder.master("""local[*]""").appName("""pyspark""").getOrCreate()
A_ : Tuple = spark.range(100).repartition(1)
A_ : Optional[int] = Spark(lowerCamelCase)
# Choose a small max_shard_size for maximum partitioning.
spark_builder._repartition_df_if_needed(max_shard_size=1)
# The new number of partitions should not be greater than the number of rows.
assert spark_builder.df.rdd.getNumPartitions() == 100
| 27 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """deberta-v2"""
def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : List[Any] = initializer_range
A_ : int = relative_attention
A_ : Tuple = max_relative_positions
A_ : int = pad_token_id
A_ : Tuple = position_biased_input
# Backwards compatibility
if type(_a ) == str:
A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : Any = pos_att_type
A_ : Optional[int] = vocab_size
A_ : Tuple = layer_norm_eps
A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a )
A_ : Union[str, Any] = pooler_dropout
A_ : List[Any] = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return 12
def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 27 | 1 |
'''simple docstring'''
import re
import jax.numpy as jnp
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.random import PRNGKey
from ..utils import logging
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Any):
A_ : str = r"""\w+[.]\d+"""
A_ : int = re.findall(lowerCamelCase , lowerCamelCase)
for pat in pats:
A_ : Optional[int] = key.replace(lowerCamelCase , """_""".join(pat.split(""".""")))
return key
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Optional[Any]):
A_ : Optional[int] = pt_tuple_key[:-1] + ("""scale""",)
if (
any("""norm""" in str_ for str_ in pt_tuple_key)
and (pt_tuple_key[-1] == "bias")
and (pt_tuple_key[:-1] + ("bias",) not in random_flax_state_dict)
and (pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict)
):
A_ : Any = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
elif pt_tuple_key[-1] in ["weight", "gamma"] and pt_tuple_key[:-1] + ("scale",) in random_flax_state_dict:
A_ : str = pt_tuple_key[:-1] + ("""scale""",)
return renamed_pt_tuple_key, pt_tensor
# embedding
if pt_tuple_key[-1] == "weight" and pt_tuple_key[:-1] + ("embedding",) in random_flax_state_dict:
A_ : List[str] = pt_tuple_key[:-1] + ("""embedding""",)
return renamed_pt_tuple_key, pt_tensor
# conv layer
A_ : Tuple = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight" and pt_tensor.ndim == 4:
A_ : Union[str, Any] = pt_tensor.transpose(2 , 3 , 1 , 0)
return renamed_pt_tuple_key, pt_tensor
# linear layer
A_ : Optional[int] = pt_tuple_key[:-1] + ("""kernel""",)
if pt_tuple_key[-1] == "weight":
A_ : List[str] = pt_tensor.T
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm weight
A_ : int = pt_tuple_key[:-1] + ("""weight""",)
if pt_tuple_key[-1] == "gamma":
return renamed_pt_tuple_key, pt_tensor
# old PyTorch layer norm bias
A_ : Union[str, Any] = pt_tuple_key[:-1] + ("""bias""",)
if pt_tuple_key[-1] == "beta":
return renamed_pt_tuple_key, pt_tensor
return pt_tuple_key, pt_tensor
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : List[Any]=42):
# Step 1: Convert pytorch tensor to numpy
A_ : Optional[int] = {k: v.numpy() for k, v in pt_state_dict.items()}
# Step 2: Since the model is stateless, get random Flax params
A_ : Dict = flax_model.init_weights(PRNGKey(lowerCamelCase))
A_ : Any = flatten_dict(lowerCamelCase)
A_ : List[str] = {}
# Need to change some parameters name to match Flax names
for pt_key, pt_tensor in pt_state_dict.items():
A_ : Optional[int] = rename_key(lowerCamelCase)
A_ : Optional[Any] = tuple(renamed_pt_key.split("""."""))
# Correctly rename weight parameters
A_ , A_ : Optional[int] = rename_key_and_reshape_tensor(lowerCamelCase , lowerCamelCase , lowerCamelCase)
if flax_key in random_flax_state_dict:
if flax_tensor.shape != random_flax_state_dict[flax_key].shape:
raise ValueError(
F'PyTorch checkpoint seems to be incorrect. Weight {pt_key} was expected to be of shape '
F'{random_flax_state_dict[flax_key].shape}, but is {flax_tensor.shape}.')
# also add unexpected weight so that warning is thrown
A_ : int = jnp.asarray(lowerCamelCase)
return unflatten_dict(lowerCamelCase)
| 27 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__magic_name__ = BeautifulSoup(res.text, 'html.parser')
__magic_name__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 27 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'openai-gpt': 'https://huggingface.co/openai-gpt/resolve/main/config.json'}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """openai-gpt"""
a_ = {
"""max_position_embeddings""": """n_positions""",
"""hidden_size""": """n_embd""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : List[str] ,_a : Any=40478 ,_a : Any=512 ,_a : str=768 ,_a : List[Any]=12 ,_a : Union[str, Any]=12 ,_a : Optional[Any]="gelu" ,_a : Optional[int]=0.1 ,_a : Dict=0.1 ,_a : List[str]=0.1 ,_a : Optional[int]=1e-5 ,_a : Any=0.02 ,_a : Dict="cls_index" ,_a : str=True ,_a : int=None ,_a : Union[str, Any]=True ,_a : Tuple=0.1 ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Union[str, Any] = vocab_size
A_ : str = n_positions
A_ : Tuple = n_embd
A_ : Any = n_layer
A_ : Union[str, Any] = n_head
A_ : Optional[int] = afn
A_ : List[str] = resid_pdrop
A_ : Dict = embd_pdrop
A_ : str = attn_pdrop
A_ : Optional[int] = layer_norm_epsilon
A_ : Dict = initializer_range
A_ : Tuple = summary_type
A_ : int = summary_use_proj
A_ : Dict = summary_activation
A_ : Any = summary_first_dropout
A_ : Optional[int] = summary_proj_to_labels
super().__init__(**_a )
| 27 |
'''simple docstring'''
from ... import PretrainedConfig
__magic_name__ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a_ = """nezha"""
def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Any = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : List[Any] = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Optional[Any] = max_relative_position
A_ : List[Any] = type_vocab_size
A_ : int = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Dict = classifier_dropout
A_ : int = use_cache
| 27 | 1 |
'''simple docstring'''
# Copyright 2023 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
from ..models.speechta import SpeechTaForTextToSpeech, SpeechTaHifiGan, SpeechTaProcessor
from ..utils import is_datasets_available
from .base import PipelineTool
if is_datasets_available():
from datasets import load_dataset
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """microsoft/speecht5_tts"""
a_ = (
"""This is a tool that reads an English text out loud. It takes an input named `text` which should contain the """
"""text to read (in English) and returns a waveform object containing the sound."""
)
a_ = """text_reader"""
a_ = SpeechTaProcessor
a_ = SpeechTaForTextToSpeech
a_ = SpeechTaHifiGan
a_ = ["""text"""]
a_ = ["""audio"""]
def _a ( self : Tuple ):
'''simple docstring'''
if self.post_processor is None:
A_ : int = """microsoft/speecht5_hifigan"""
super().setup()
def _a ( self : Any ,_a : Optional[int] ,_a : List[str]=None ):
'''simple docstring'''
A_ : Any = self.pre_processor(text=_a ,return_tensors="""pt""" ,truncation=_a )
if speaker_embeddings is None:
if not is_datasets_available():
raise ImportError("""Datasets needs to be installed if not passing speaker embeddings.""" )
A_ : Dict = load_dataset("""Matthijs/cmu-arctic-xvectors""" ,split="""validation""" )
A_ : Union[str, Any] = torch.tensor(embeddings_dataset[7305]["""xvector"""] ).unsqueeze(0 )
return {"input_ids": inputs["input_ids"], "speaker_embeddings": speaker_embeddings}
def _a ( self : Any ,_a : List[str] ):
'''simple docstring'''
with torch.no_grad():
return self.model.generate_speech(**_a )
def _a ( self : int ,_a : List[str] ):
'''simple docstring'''
with torch.no_grad():
return self.post_processor(_a ).cpu().detach()
| 27 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str):
A_ , A_ : List[Any] = set(lowerCamelCase), [start]
while stack:
A_ : Optional[Any] = stack.pop()
explored.add(lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(lowerCamelCase)
return explored
__magic_name__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 27 | 1 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ):
'''simple docstring'''
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 27 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A_ : Union[str, Any] = [144, 192, 240]
A_ : int = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
A_ : List[str] = [96, 120, 144]
A_ : Any = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
A_ : Any = [64, 80, 96]
A_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
A_ : Any = 0.05
A_ : List[Any] = 2.0
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : int = 512
A_ : Optional[int] = 16
A_ : List[Any] = 21
A_ : List[str] = """pascal-voc-id2label.json"""
else:
A_ : str = 1000
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = """huggingface/label-files"""
A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False):
for i in range(1 , 6):
if F'layer_{i}.' in name:
A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.')
if "conv_1." in name:
A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""")
if ".block." in name:
A_ : Optional[Any] = name.replace(""".block.""" , """.""")
if "exp_1x1" in name:
A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""")
if "red_1x1" in name:
A_ : int = name.replace("""red_1x1""" , """reduce_1x1""")
if ".local_rep.conv_3x3." in name:
A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""")
if ".local_rep.conv_1x1." in name:
A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""")
if ".norm." in name:
A_ : Tuple = name.replace(""".norm.""" , """.normalization.""")
if ".conv." in name:
A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""")
if ".conv_proj." in name:
A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""")
for i in range(0 , 2):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.')
for i in range(2 , 6):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.')
if "expand_1x1" in name:
A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""")
if "conv_3x3" in name:
A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""")
if "reduce_1x1" in name:
A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""")
for i in range(2 , 5):
if F'.global_rep.{i}.weight' in name:
A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""")
if F'.global_rep.{i}.bias' in name:
A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""")
if ".global_rep." in name:
A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""")
if ".pre_norm_mha.0." in name:
A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""")
if ".pre_norm_mha.1.out_proj." in name:
A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""")
if ".pre_norm_ffn.0." in name:
A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""")
if ".pre_norm_ffn.1." in name:
A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""")
if ".pre_norm_ffn.4." in name:
A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""")
if ".transformer." in name:
A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""")
if ".aspp_layer." in name:
A_ : int = name.replace(""".aspp_layer.""" , """.""")
if ".aspp_pool." in name:
A_ : Tuple = name.replace(""".aspp_pool.""" , """.""")
if "seg_head." in name:
A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""")
if "segmentation_head.classifier.classifier." in name:
A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""")
if "classifier.fc." in name:
A_ : str = name.replace("""classifier.fc.""" , """classifier.""")
elif (not base_model) and ("segmentation_head." not in name):
A_ : str = """mobilevit.""" + name
return name
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False):
if base_model:
A_ : Dict = """"""
else:
A_ : Any = """mobilevit."""
for key in orig_state_dict.copy().keys():
A_ : List[Any] = orig_state_dict.pop(lowerCamelCase)
if key[:8] == "encoder.":
A_ : int = key[8:]
if "qkv" in key:
A_ : Any = key.split(""".""")
A_ : str = int(key_split[0][6:]) - 1
A_ : int = int(key_split[3])
A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}')
A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A_ : Optional[Any] = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
A_ : Dict = val[:dim, :]
A_ : Optional[int] = val[dim : dim * 2, :]
A_ : List[Any] = val[-dim:, :]
else:
A_ : Optional[Any] = val[:dim]
A_ : List[Any] = val[dim : dim * 2]
A_ : Any = val[-dim:]
else:
A_ : List[str] = val
return orig_state_dict
def lowerCamelCase ( ):
A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False):
A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase)
# load original state_dict
A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""")
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval()
else:
A_ : str = MobileViTForImageClassification(lowerCamelCase).eval()
A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase)
model.load_state_dict(lowerCamelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""")
A_ : List[Any] = model(**lowerCamelCase)
A_ : Dict = outputs.logits
if mobilevit_name.startswith("""deeplabv3_"""):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A_ : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A_ : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A_ : Tuple = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4)
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241])
elif mobilevit_name == "mobilevit_xs":
A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587])
elif mobilevit_name == "mobilevit_xxs":
A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if push_to_hub:
A_ : str = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""")
A_ : Union[str, Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCamelCase , organization="""apple""")
model.push_to_hub(lowerCamelCase , organization="""apple""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__magic_name__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 27 | 1 |
'''simple docstring'''
from math import factorial
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : List[str] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = real
if isinstance(_a ,_a ):
A_ : str = [1] * rank
else:
A_ : Dict = rank
def __repr__( self : Tuple ):
'''simple docstring'''
return (
f'{self.real}+'
f'{"+".join(str(_a )+"E"+str(n+1 )for n,dual in enumerate(self.duals ) )}'
)
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Dict = self.duals.copy()
while cur[-1] == 0:
cur.pop(-1 )
return Dual(self.real ,_a )
def __add__( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
if not isinstance(_a ,_a ):
return Dual(self.real + other ,self.duals )
A_ : Dict = self.duals.copy()
A_ : Dict = other.duals.copy()
if len(_a ) > len(_a ):
o_dual.extend([1] * (len(_a ) - len(_a )) )
elif len(_a ) < len(_a ):
s_dual.extend([1] * (len(_a ) - len(_a )) )
A_ : str = []
for i in range(len(_a ) ):
new_duals.append(s_dual[i] + o_dual[i] )
return Dual(self.real + other.real ,_a )
a_ = __add__
def __sub__( self : Optional[int] ,_a : Tuple ):
'''simple docstring'''
return self + other * -1
def __mul__( self : List[Any] ,_a : Tuple ):
'''simple docstring'''
if not isinstance(_a ,_a ):
A_ : Optional[int] = []
for i in self.duals:
new_duals.append(i * other )
return Dual(self.real * other ,_a )
A_ : int = [0] * (len(self.duals ) + len(other.duals ) + 1)
for i, item in enumerate(self.duals ):
for j, jtem in enumerate(other.duals ):
new_duals[i + j + 1] += item * jtem
for k in range(len(self.duals ) ):
new_duals[k] += self.duals[k] * other.real
for index in range(len(other.duals ) ):
new_duals[index] += other.duals[index] * self.real
return Dual(self.real * other.real ,_a )
a_ = __mul__
def __truediv__( self : Dict ,_a : Dict ):
'''simple docstring'''
if not isinstance(_a ,_a ):
A_ : List[str] = []
for i in self.duals:
new_duals.append(i / other )
return Dual(self.real / other ,_a )
raise ValueError
def __floordiv__( self : Any ,_a : Tuple ):
'''simple docstring'''
if not isinstance(_a ,_a ):
A_ : Optional[Any] = []
for i in self.duals:
new_duals.append(i // other )
return Dual(self.real // other ,_a )
raise ValueError
def __pow__( self : str ,_a : Union[str, Any] ):
'''simple docstring'''
if n < 0 or isinstance(_a ,_a ):
raise ValueError("""power must be a positive integer""" )
if n == 0:
return 1
if n == 1:
return self
A_ : int = self
for _ in range(n - 1 ):
x *= self
return x
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : int):
if not callable(lowerCamelCase):
raise ValueError("""differentiate() requires a function as input for func""")
if not isinstance(lowerCamelCase , (float, int)):
raise ValueError("""differentiate() requires a float as input for position""")
if not isinstance(lowerCamelCase , lowerCamelCase):
raise ValueError("""differentiate() requires an int as input for order""")
A_ : Tuple = Dual(lowerCamelCase , 1)
A_ : int = func(lowerCamelCase)
if order == 0:
return result.real
return result.duals[order - 1] * factorial(lowerCamelCase)
if __name__ == "__main__":
import doctest
doctest.testmod()
def lowerCamelCase ( lowerCamelCase : List[Any]):
return y**2 * y**4
print(differentiate(f, 9, 2))
| 27 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Tuple = size if size is not None else {"""shortest_edge""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" )
A_ : Any = do_resize
A_ : List[str] = size
A_ : Union[str, Any] = resample
A_ : Dict = do_center_crop
A_ : List[str] = crop_size
A_ : Any = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Any = do_normalize
A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Tuple = do_convert_rgb
def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a )
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : Optional[int] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,):
'''simple docstring'''
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,):
'''simple docstring'''
A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A_ : Tuple = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a )
A_ : List[str] = resample if resample is not None else self.resample
A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a )
A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Any = do_normalize if do_normalize is not None else self.do_normalize
A_ : int = image_mean if image_mean is not None else self.image_mean
A_ : int = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : Optional[int] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
A_ : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images]
if do_center_crop:
A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images]
if do_normalize:
A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images]
A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images]
A_ : List[str] = {"""pixel_values""": images}
return BatchFeature(data=_a ,tensor_type=_a )
| 27 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : list):
def merge(lowerCamelCase : list , lowerCamelCase : list) -> list:
def _merge():
while left and right:
yield (left if left[0] <= right[0] else right).pop(0)
yield from left
yield from right
return list(_merge())
if len(lowerCamelCase) <= 1:
return collection
A_ : Dict = len(lowerCamelCase) // 2
return merge(merge_sort(collection[:mid]) , merge_sort(collection[mid:]))
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = input('Enter numbers separated by a comma:\n').strip()
__magic_name__ = [int(item) for item in user_input.split(',')]
print(*merge_sort(unsorted), sep=',')
| 27 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ):
'''simple docstring'''
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 27 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int):
A_ : int = generate_pascal_triangle(lowerCamelCase)
for row_idx in range(lowerCamelCase):
# Print left spaces
for _ in range(num_rows - row_idx - 1):
print(end=""" """)
# Print row values
for col_idx in range(row_idx + 1):
if col_idx != row_idx:
print(triangle[row_idx][col_idx] , end=""" """)
else:
print(triangle[row_idx][col_idx] , end="""""")
print()
def lowerCamelCase ( lowerCamelCase : int):
if not isinstance(lowerCamelCase , lowerCamelCase):
raise TypeError("""The input value of 'num_rows' should be 'int'""")
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""")
A_ : list[list[int]] = []
for current_row_idx in range(lowerCamelCase):
A_ : Dict = populate_current_row(lowerCamelCase , lowerCamelCase)
triangle.append(lowerCamelCase)
return triangle
def lowerCamelCase ( lowerCamelCase : list[list[int]] , lowerCamelCase : int):
A_ : List[Any] = [-1] * (current_row_idx + 1)
# first and last elements of current row are equal to 1
A_ , A_ : Any = 1, 1
for current_col_idx in range(1 , lowerCamelCase):
calculate_current_element(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
return current_row
def lowerCamelCase ( lowerCamelCase : list[list[int]] , lowerCamelCase : list[int] , lowerCamelCase : int , lowerCamelCase : int , ):
A_ : int = triangle[current_row_idx - 1][current_col_idx - 1]
A_ : int = triangle[current_row_idx - 1][current_col_idx]
A_ : int = above_to_left_elt + above_to_right_elt
def lowerCamelCase ( lowerCamelCase : int):
if not isinstance(lowerCamelCase , lowerCamelCase):
raise TypeError("""The input value of 'num_rows' should be 'int'""")
if num_rows == 0:
return []
elif num_rows < 0:
raise ValueError(
"""The input value of 'num_rows' should be greater than or equal to 0""")
A_ : list[list[int]] = [[1]]
for row_index in range(1 , lowerCamelCase):
A_ : Dict = [0] + result[-1] + [0]
A_ : Any = row_index + 1
# Calculate the number of distinct elements in a row
A_ : Optional[Any] = sum(divmod(lowerCamelCase , 2))
A_ : Tuple = [
temp_row[i - 1] + temp_row[i] for i in range(1 , distinct_elements + 1)
]
A_ : str = row_first_half[: (row_index + 1) // 2]
row_second_half.reverse()
A_ : Optional[int] = row_first_half + row_second_half
result.append(lowerCamelCase)
return result
def lowerCamelCase ( ):
from collections.abc import Callable
from timeit import timeit
def benchmark_a_function(lowerCamelCase : Callable , lowerCamelCase : int) -> None:
A_ : Union[str, Any] = F'{func.__name__}({value})'
A_ : str = timeit(F'__main__.{call}' , setup="""import __main__""")
# print(f"{call:38} = {func(value)} -- {timing:.4f} seconds")
print(F'{call:38} -- {timing:.4f} seconds')
for value in range(15): # (1, 7, 14):
for func in (generate_pascal_triangle, generate_pascal_triangle_optimized):
benchmark_a_function(lowerCamelCase , lowerCamelCase)
print()
if __name__ == "__main__":
import doctest
doctest.testmod()
benchmark()
| 27 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ):
A_ : int = symbols(lowerCamelCase)
A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase)
A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase))
A_ : str = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError("""Could not find root""") from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
A_ : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 27 | 1 |
'''simple docstring'''
import argparse
import csv
import logging
import os
import random
import numpy as np
import torch
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler, TensorDataset
from tqdm import tqdm, trange
from transformers import (
CONFIG_NAME,
WEIGHTS_NAME,
AdamW,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTTokenizer,
get_linear_schedule_with_warmup,
)
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s', datefmt='%m/%d/%Y %H:%M:%S', level=logging.INFO
)
__magic_name__ = logging.getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Any):
A_ : List[Any] = np.argmax(lowerCamelCase , axis=1)
return np.sum(outputs == labels)
def lowerCamelCase ( lowerCamelCase : List[str]):
with open(lowerCamelCase , encoding="""utf_8""") as f:
A_ : str = csv.reader(lowerCamelCase)
A_ : Tuple = []
next(lowerCamelCase) # skip the first line
for line in tqdm(lowerCamelCase):
output.append((""" """.join(line[1:5]), line[5], line[6], int(line[-1]) - 1))
return output
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Dict , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : Dict):
A_ : int = []
for dataset in encoded_datasets:
A_ : int = len(lowerCamelCase)
A_ : Dict = np.zeros((n_batch, 2, input_len) , dtype=np.intaa)
A_ : List[str] = np.zeros((n_batch, 2) , dtype=np.intaa)
A_ : List[str] = np.full((n_batch, 2, input_len) , fill_value=-100 , dtype=np.intaa)
A_ : Union[str, Any] = np.zeros((n_batch,) , dtype=np.intaa)
for (
i,
(story, conta, conta, mc_label),
) in enumerate(lowerCamelCase):
A_ : List[Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A_ : Union[str, Any] = [start_token] + story[:cap_length] + [delimiter_token] + conta[:cap_length] + [clf_token]
A_ : Dict = with_conta
A_ : Dict = with_conta
A_ : str = len(lowerCamelCase) - 1
A_ : Any = len(lowerCamelCase) - 1
A_ : Optional[Any] = with_conta
A_ : Dict = with_conta
A_ : Optional[Any] = mc_label
A_ : List[str] = (input_ids, mc_token_ids, lm_labels, mc_labels)
tensor_datasets.append(tuple(torch.tensor(lowerCamelCase) for t in all_inputs))
return tensor_datasets
def lowerCamelCase ( ):
A_ : List[Any] = argparse.ArgumentParser()
parser.add_argument("""--model_name""" , type=lowerCamelCase , default="""openai-gpt""" , help="""pretrained model name""")
parser.add_argument("""--do_train""" , action="""store_true""" , help="""Whether to run training.""")
parser.add_argument("""--do_eval""" , action="""store_true""" , help="""Whether to run eval on the dev set.""")
parser.add_argument(
"""--output_dir""" , default=lowerCamelCase , type=lowerCamelCase , required=lowerCamelCase , help="""The output directory where the model predictions and checkpoints will be written.""" , )
parser.add_argument("""--train_dataset""" , type=lowerCamelCase , default="""""")
parser.add_argument("""--eval_dataset""" , type=lowerCamelCase , default="""""")
parser.add_argument("""--seed""" , type=lowerCamelCase , default=42)
parser.add_argument("""--num_train_epochs""" , type=lowerCamelCase , default=3)
parser.add_argument("""--train_batch_size""" , type=lowerCamelCase , default=8)
parser.add_argument("""--eval_batch_size""" , type=lowerCamelCase , default=16)
parser.add_argument("""--adam_epsilon""" , default=1E-8 , type=lowerCamelCase , help="""Epsilon for Adam optimizer.""")
parser.add_argument("""--max_grad_norm""" , type=lowerCamelCase , default=1)
parser.add_argument(
"""--max_steps""" , default=-1 , type=lowerCamelCase , help=(
"""If > 0: set total number of training steps to perform. Override num_train_epochs."""
) , )
parser.add_argument(
"""--gradient_accumulation_steps""" , type=lowerCamelCase , default=1 , help="""Number of updates steps to accumulate before performing a backward/update pass.""" , )
parser.add_argument("""--learning_rate""" , type=lowerCamelCase , default=6.25E-5)
parser.add_argument("""--warmup_steps""" , default=0 , type=lowerCamelCase , help="""Linear warmup over warmup_steps.""")
parser.add_argument("""--lr_schedule""" , type=lowerCamelCase , default="""warmup_linear""")
parser.add_argument("""--weight_decay""" , type=lowerCamelCase , default=0.01)
parser.add_argument("""--lm_coef""" , type=lowerCamelCase , default=0.9)
parser.add_argument("""--n_valid""" , type=lowerCamelCase , default=374)
parser.add_argument("""--server_ip""" , type=lowerCamelCase , default="""""" , help="""Can be used for distant debugging.""")
parser.add_argument("""--server_port""" , type=lowerCamelCase , default="""""" , help="""Can be used for distant debugging.""")
A_ : str = parser.parse_args()
print(lowerCamelCase)
if args.server_ip and args.server_port:
# Distant debugging - see https://code.visualstudio.com/docs/python/debugging#_attach-to-a-local-script
import ptvsd
print("""Waiting for debugger attach""")
ptvsd.enable_attach(address=(args.server_ip, args.server_port) , redirect_output=lowerCamelCase)
ptvsd.wait_for_attach()
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
A_ : Optional[int] = torch.device("""cuda""" if torch.cuda.is_available() else """cpu""")
A_ : List[str] = torch.cuda.device_count()
logger.info("""device: {}, n_gpu {}""".format(lowerCamelCase , lowerCamelCase))
if not args.do_train and not args.do_eval:
raise ValueError("""At least one of `do_train` or `do_eval` must be True.""")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
# Load tokenizer and model
# This loading functions also add new tokens and embeddings called `special tokens`
# These new embeddings will be fine-tuned on the RocStories dataset
A_ : int = ["""_start_""", """_delimiter_""", """_classify_"""]
A_ : Any = OpenAIGPTTokenizer.from_pretrained(args.model_name)
tokenizer.add_tokens(lowerCamelCase)
A_ : Optional[int] = tokenizer.convert_tokens_to_ids(lowerCamelCase)
A_ : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.model_name)
model.resize_token_embeddings(len(lowerCamelCase))
model.to(lowerCamelCase)
# Load and encode the datasets
def tokenize_and_encode(lowerCamelCase : List[Any]):
if isinstance(lowerCamelCase , lowerCamelCase):
return tokenizer.convert_tokens_to_ids(tokenizer.tokenize(lowerCamelCase))
elif isinstance(lowerCamelCase , lowerCamelCase):
return obj
return [tokenize_and_encode(lowerCamelCase) for o in obj]
logger.info("""Encoding dataset...""")
A_ : Dict = load_rocstories_dataset(args.train_dataset)
A_ : Optional[Any] = load_rocstories_dataset(args.eval_dataset)
A_ : Union[str, Any] = (train_dataset, eval_dataset)
A_ : Optional[int] = tokenize_and_encode(lowerCamelCase)
# Compute the max input length for the Transformer
A_ : int = model.config.n_positions // 2 - 2
A_ : Union[str, Any] = max(
len(story[:max_length]) + max(len(conta[:max_length]) , len(conta[:max_length])) + 3
for dataset in encoded_datasets
for story, conta, conta, _ in dataset)
A_ : Tuple = min(lowerCamelCase , model.config.n_positions) # Max size of input for the pre-trained model
# Prepare inputs tensors and dataloaders
A_ : Dict = pre_process_datasets(lowerCamelCase , lowerCamelCase , lowerCamelCase , *lowerCamelCase)
A_ , A_ : Optional[Any] = tensor_datasets[0], tensor_datasets[1]
A_ : List[Any] = TensorDataset(*lowerCamelCase)
A_ : Tuple = RandomSampler(lowerCamelCase)
A_ : Any = DataLoader(lowerCamelCase , sampler=lowerCamelCase , batch_size=args.train_batch_size)
A_ : List[Any] = TensorDataset(*lowerCamelCase)
A_ : Tuple = SequentialSampler(lowerCamelCase)
A_ : int = DataLoader(lowerCamelCase , sampler=lowerCamelCase , batch_size=args.eval_batch_size)
# Prepare optimizer
if args.do_train:
if args.max_steps > 0:
A_ : str = args.max_steps
A_ : Union[str, Any] = args.max_steps // (len(lowerCamelCase) // args.gradient_accumulation_steps) + 1
else:
A_ : List[Any] = len(lowerCamelCase) // args.gradient_accumulation_steps * args.num_train_epochs
A_ : Union[str, Any] = list(model.named_parameters())
A_ : Optional[Any] = ["""bias""", """LayerNorm.bias""", """LayerNorm.weight"""]
A_ : List[Any] = [
{
"""params""": [p for n, p in param_optimizer if not any(nd in n for nd in no_decay)],
"""weight_decay""": args.weight_decay,
},
{"""params""": [p for n, p in param_optimizer if any(nd in n for nd in no_decay)], """weight_decay""": 0.0},
]
A_ : Union[str, Any] = AdamW(lowerCamelCase , lr=args.learning_rate , eps=args.adam_epsilon)
A_ : Any = get_linear_schedule_with_warmup(
lowerCamelCase , num_warmup_steps=args.warmup_steps , num_training_steps=lowerCamelCase)
if args.do_train:
A_ , A_ , A_ : int = 0, 0, None
model.train()
for _ in trange(int(args.num_train_epochs) , desc="""Epoch"""):
A_ : List[str] = 0
A_ : str = 0
A_ : Union[str, Any] = tqdm(lowerCamelCase , desc="""Training""")
for step, batch in enumerate(lowerCamelCase):
A_ : Any = tuple(t.to(lowerCamelCase) for t in batch)
A_ , A_ , A_ , A_ : List[str] = batch
A_ : Any = model(lowerCamelCase , mc_token_ids=lowerCamelCase , lm_labels=lowerCamelCase , mc_labels=lowerCamelCase)
A_ : Dict = args.lm_coef * losses[0] + losses[1]
loss.backward()
optimizer.step()
scheduler.step()
optimizer.zero_grad()
tr_loss += loss.item()
A_ : Optional[Any] = (
loss.item() if exp_average_loss is None else 0.7 * exp_average_loss + 0.3 * loss.item()
)
nb_tr_steps += 1
A_ : Any = """Training loss: {:.2e} lr: {:.2e}""".format(lowerCamelCase , scheduler.get_lr()[0])
# Save a trained model
if args.do_train:
# Save a trained model, configuration and tokenizer
A_ : Optional[int] = model.module if hasattr(lowerCamelCase , """module""") else model # Only save the model itself
# If we save using the predefined names, we can load using `from_pretrained`
A_ : Dict = os.path.join(args.output_dir , lowerCamelCase)
A_ : int = os.path.join(args.output_dir , lowerCamelCase)
torch.save(model_to_save.state_dict() , lowerCamelCase)
model_to_save.config.to_json_file(lowerCamelCase)
tokenizer.save_vocabulary(args.output_dir)
# Load a trained model and vocabulary that you have fine-tuned
A_ : List[str] = OpenAIGPTDoubleHeadsModel.from_pretrained(args.output_dir)
A_ : Union[str, Any] = OpenAIGPTTokenizer.from_pretrained(args.output_dir)
model.to(lowerCamelCase)
if args.do_eval:
model.eval()
A_ , A_ : Any = 0, 0
A_ , A_ : Any = 0, 0
for batch in tqdm(lowerCamelCase , desc="""Evaluating"""):
A_ : List[Any] = tuple(t.to(lowerCamelCase) for t in batch)
A_ , A_ , A_ , A_ : int = batch
with torch.no_grad():
A_ , A_ , A_ , A_ : Any = model(
lowerCamelCase , mc_token_ids=lowerCamelCase , lm_labels=lowerCamelCase , mc_labels=lowerCamelCase)
A_ : Tuple = mc_logits.detach().cpu().numpy()
A_ : Union[str, Any] = mc_labels.to("""cpu""").numpy()
A_ : Dict = accuracy(lowerCamelCase , lowerCamelCase)
eval_loss += mc_loss.mean().item()
eval_accuracy += tmp_eval_accuracy
nb_eval_examples += input_ids.size(0)
nb_eval_steps += 1
A_ : Optional[Any] = eval_loss / nb_eval_steps
A_ : Union[str, Any] = eval_accuracy / nb_eval_examples
A_ : Tuple = tr_loss / nb_tr_steps if args.do_train else None
A_ : str = {"""eval_loss""": eval_loss, """eval_accuracy""": eval_accuracy, """train_loss""": train_loss}
A_ : Tuple = os.path.join(args.output_dir , """eval_results.txt""")
with open(lowerCamelCase , """w""") as writer:
logger.info("""***** Eval results *****""")
for key in sorted(result.keys()):
logger.info(""" %s = %s""" , lowerCamelCase , str(result[key]))
writer.write("""%s = %s\n""" % (key, str(result[key])))
if __name__ == "__main__":
main()
| 27 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_a : Dict ):
'''simple docstring'''
super().__init__()
A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a )
A_ : int = list(model.children() )[:-2]
A_ : int = nn.Sequential(*_a )
A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self : str ,_a : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.pool(self.model(_a ) )
A_ : Any = torch.flatten(_a ,start_dim=2 )
A_ : str = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Dict = [json.loads(_a ) for l in open(_a )]
A_ : Optional[int] = os.path.dirname(_a )
A_ : Optional[Any] = tokenizer
A_ : Optional[Any] = labels
A_ : List[Any] = len(_a )
A_ : str = max_seq_length
A_ : str = transforms
def __len__( self : str ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) )
A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1]
A_ : Optional[int] = sentence[: self.max_seq_length]
A_ : Any = torch.zeros(self.n_classes )
A_ : Tuple = 1
A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" )
A_ : Union[str, Any] = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( lowerCamelCase : str):
A_ : List[Any] = [len(row["""sentence"""]) for row in batch]
A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase)
A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)):
A_ : str = input_row["""sentence"""]
A_ : Tuple = 1
A_ : int = torch.stack([row["""image"""] for row in batch])
A_ : str = torch.stack([row["""label"""] for row in batch])
A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch])
A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
])
| 27 | 1 |
'''simple docstring'''
import unittest
import numpy as np
import torch
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import (
AutoencoderKL,
DDIMScheduler,
DPMSolverMultistepScheduler,
TextToVideoSDPipeline,
UNetaDConditionModel,
)
from diffusers.utils import is_xformers_available, load_numpy, skip_mps, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism
from ..pipeline_params import TEXT_TO_IMAGE_BATCH_PARAMS, TEXT_TO_IMAGE_PARAMS
from ..test_pipelines_common import PipelineTesterMixin
enable_full_determinism()
@skip_mps
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = TextToVideoSDPipeline
a_ = TEXT_TO_IMAGE_PARAMS
a_ = TEXT_TO_IMAGE_BATCH_PARAMS
# No `output_type`.
a_ = frozenset(
[
"""num_inference_steps""",
"""generator""",
"""latents""",
"""return_dict""",
"""callback""",
"""callback_steps""",
] )
def _a ( self : Any ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = UNetaDConditionModel(
block_out_channels=(32, 64, 64, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=4 ,out_channels=4 ,down_block_types=("""CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """CrossAttnDownBlock3D""", """DownBlock3D""") ,up_block_types=("""UpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""", """CrossAttnUpBlock3D""") ,cross_attention_dim=32 ,attention_head_dim=4 ,)
A_ : Tuple = DDIMScheduler(
beta_start=0.00085 ,beta_end=0.012 ,beta_schedule="""scaled_linear""" ,clip_sample=_a ,set_alpha_to_one=_a ,)
torch.manual_seed(0 )
A_ : str = AutoencoderKL(
block_out_channels=[32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,sample_size=128 ,)
torch.manual_seed(0 )
A_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
A_ : List[Any] = CLIPTextModel(_a )
A_ : str = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : str = {
"""unet""": unet,
"""scheduler""": scheduler,
"""vae""": vae,
"""text_encoder""": text_encoder,
"""tokenizer""": tokenizer,
}
return components
def _a ( self : str ,_a : List[Any] ,_a : Union[str, Any]=0 ):
'''simple docstring'''
if str(_a ).startswith("""mps""" ):
A_ : Dict = torch.manual_seed(_a )
else:
A_ : str = torch.Generator(device=_a ).manual_seed(_a )
A_ : Dict = {
"""prompt""": """A painting of a squirrel eating a burger""",
"""generator""": generator,
"""num_inference_steps""": 2,
"""guidance_scale""": 6.0,
"""output_type""": """pt""",
}
return inputs
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : str = self.get_dummy_components()
A_ : Optional[Any] = TextToVideoSDPipeline(**_a )
A_ : str = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = self.get_dummy_inputs(_a )
A_ : int = """np"""
A_ : List[str] = sd_pipe(**_a ).frames
A_ : int = frames[0][-3:, -3:, -1]
assert frames[0].shape == (64, 64, 3)
A_ : List[Any] = np.array([158.0, 160.0, 153.0, 125.0, 100.0, 121.0, 111.0, 93.0, 113.0] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Any ):
'''simple docstring'''
self._test_attention_slicing_forward_pass(test_mean_pixel_difference=_a ,expected_max_diff=3e-3 )
@unittest.skipIf(
torch_device != """cuda""" or not is_xformers_available() ,reason="""XFormers attention is only available with CUDA and `xformers` installed""" ,)
def _a ( self : Union[str, Any] ):
'''simple docstring'''
self._test_xformers_attention_forwardGenerator_pass(test_mean_pixel_difference=_a ,expected_max_diff=1e-2 )
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _a ( self : int ):
'''simple docstring'''
pass
@unittest.skip(reason="""Batching needs to be properly figured out first for this pipeline.""" )
def _a ( self : Tuple ):
'''simple docstring'''
pass
@unittest.skip(reason="""`num_images_per_prompt` argument is not supported for this pipeline.""" )
def _a ( self : Optional[Any] ):
'''simple docstring'''
pass
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return super().test_progress_bar()
@slow
@skip_mps
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : List[str] ):
'''simple docstring'''
A_ : str = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video.npy""" )
A_ : Optional[Any] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A_ : Tuple = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config )
A_ : List[Any] = pipe.to("""cuda""" )
A_ : Optional[Any] = """Spiderman is surfing"""
A_ : Optional[int] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A_ : List[str] = pipe(_a ,generator=_a ,num_inference_steps=25 ,output_type="""pt""" ).frames
A_ : str = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
def _a ( self : int ):
'''simple docstring'''
A_ : int = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/text_to_video/video_2step.npy""" )
A_ : Optional[Any] = TextToVideoSDPipeline.from_pretrained("""damo-vilab/text-to-video-ms-1.7b""" )
A_ : Any = pipe.to("""cuda""" )
A_ : Any = """Spiderman is surfing"""
A_ : Optional[Any] = torch.Generator(device="""cpu""" ).manual_seed(0 )
A_ : List[str] = pipe(_a ,generator=_a ,num_inference_steps=2 ,output_type="""pt""" ).frames
A_ : List[Any] = video_frames.cpu().numpy()
assert np.abs(expected_video - video ).mean() < 5e-2
| 27 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCamelCase : int):
if num <= 0:
A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCamelCase)
A_ : str = [True] * (num + 1)
A_ : Tuple = []
A_ : str = 2
A_ : Any = int(math.sqrt(lowerCamelCase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase)
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(lowerCamelCase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 27 | 1 |
'''simple docstring'''
from __future__ import annotations
__magic_name__ = {
'A': ['B', 'C', 'E'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F', 'G'],
'D': ['B'],
'E': ['A', 'B', 'D'],
'F': ['C'],
'G': ['C'],
}
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[Any] ,_a : dict[str, list[str]] ,_a : str ):
'''simple docstring'''
A_ : str = graph
# mapping node to its parent in resulting breadth first tree
A_ : dict[str, str | None] = {}
A_ : List[str] = source_vertex
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = {self.source_vertex}
A_ : List[str] = None
A_ : List[str] = [self.source_vertex] # first in first out queue
while queue:
A_ : Union[str, Any] = queue.pop(0 )
for adjacent_vertex in self.graph[vertex]:
if adjacent_vertex not in visited:
visited.add(_a )
A_ : Optional[Any] = vertex
queue.append(_a )
def _a ( self : Dict ,_a : str ):
'''simple docstring'''
if target_vertex == self.source_vertex:
return self.source_vertex
A_ : Dict = self.parent.get(_a )
if target_vertex_parent is None:
A_ : str = (
f'No path from vertex: {self.source_vertex} to vertex: {target_vertex}'
)
raise ValueError(_a )
return self.shortest_path(_a ) + f'->{target_vertex}'
if __name__ == "__main__":
__magic_name__ = Graph(graph, 'G')
g.breath_first_search()
print(g.shortest_path('D'))
print(g.shortest_path('G'))
print(g.shortest_path('Foo'))
| 27 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__magic_name__ = trt.Logger(trt.Logger.WARNING)
__magic_name__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__magic_name__ = parser.parse_args()
if args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__magic_name__ = args.per_device_eval_batch_size
__magic_name__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__magic_name__ = True
__magic_name__ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__magic_name__ = 'temp_engine/bert-fp16.engine'
if args.inta:
__magic_name__ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__magic_name__ = [network.get_input(i) for i in range(network.num_inputs)]
__magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__magic_name__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__magic_name__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__magic_name__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]):
A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase)
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : str = time.time()
A_ : Tuple = end_time - start_time
A_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__magic_name__ = raw_datasets['validation'].column_names
__magic_name__ = 'question' if 'question' in column_names else column_names[0]
__magic_name__ = 'context' if 'context' in column_names else column_names[1]
__magic_name__ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__magic_name__ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__magic_name__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( lowerCamelCase : Dict):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Union[str, Any] = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase)
A_ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__magic_name__ = raw_datasets['validation']
# Validation Feature Creation
__magic_name__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__magic_name__ = default_data_collator
__magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__magic_name__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
A_ : Tuple = postprocess_qa_predictions(
examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : Dict = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase)
__magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize
# Allocate device memory for inputs and outputs.
__magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__magic_name__ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__magic_name__ = 0.0
__magic_name__ = 0
__magic_name__ = timeit.default_timer()
__magic_name__ = None
for step, batch in enumerate(eval_dataloader):
__magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__magic_name__ , __magic_name__ = outputs
__magic_name__ = torch.tensor(start_logits)
__magic_name__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__magic_name__ = nested_truncate(all_preds, len(eval_dataset))
__magic_name__ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds)
__magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 27 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[Any]):
print("""\nThe shortest path matrix using Floyd Warshall algorithm\n""")
for i in range(lowerCamelCase):
for j in range(lowerCamelCase):
if dist[i][j] != float("""inf"""):
print(int(dist[i][j]) , end="""\t""")
else:
print("""INF""" , end="""\t""")
print()
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = [[float("""inf""") for _ in range(lowerCamelCase)] for _ in range(lowerCamelCase)]
for i in range(lowerCamelCase):
for j in range(lowerCamelCase):
A_ : List[str] = graph[i][j]
# check vertex k against all other vertices (i, j)
for k in range(lowerCamelCase):
# looping through rows of graph array
for i in range(lowerCamelCase):
# looping through columns of graph array
for j in range(lowerCamelCase):
if (
dist[i][k] != float("""inf""")
and dist[k][j] != float("""inf""")
and dist[i][k] + dist[k][j] < dist[i][j]
):
A_ : Tuple = dist[i][k] + dist[k][j]
_print_dist(lowerCamelCase , lowerCamelCase)
return dist, v
if __name__ == "__main__":
__magic_name__ = int(input('Enter number of vertices: '))
__magic_name__ = int(input('Enter number of edges: '))
__magic_name__ = [[float('inf') for i in range(v)] for j in range(v)]
for i in range(v):
__magic_name__ = 0.0
# src and dst are indices that must be within the array size graph[e][v]
# failure to follow this will result in an error
for i in range(e):
print('\nEdge ', i + 1)
__magic_name__ = int(input('Enter source:'))
__magic_name__ = int(input('Enter destination:'))
__magic_name__ = float(input('Enter weight:'))
__magic_name__ = weight
floyd_warshall(graph, v)
# Example Input
# Enter number of vertices: 3
# Enter number of edges: 2
# # generated graph from vertex and edge inputs
# [[inf, inf, inf], [inf, inf, inf], [inf, inf, inf]]
# [[0.0, inf, inf], [inf, 0.0, inf], [inf, inf, 0.0]]
# specify source, destination and weight for edge #1
# Edge 1
# Enter source:1
# Enter destination:2
# Enter weight:2
# specify source, destination and weight for edge #2
# Edge 2
# Enter source:2
# Enter destination:1
# Enter weight:1
# # Expected Output from the vertice, edge and src, dst, weight inputs!!
# 0 INF INF
# INF 0 2
# INF 1 0
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ConvNextFeatureExtractor']
__magic_name__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 27 | 1 |
'''simple docstring'''
import inspect
import unittest
import numpy as np
from tests.test_modeling_common import floats_tensor
from transformers import MaskaFormerConfig, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_torch_multi_gpu, require_vision, slow, torch_device
from transformers.utils import cached_property
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import MaskaFormerForUniversalSegmentation, MaskaFormerModel
if is_vision_available():
from transformers import MaskaFormerImageProcessor
if is_vision_available():
from PIL import Image
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Dict ,_a : str ,_a : Optional[Any]=2 ,_a : Union[str, Any]=True ,_a : Optional[int]=False ,_a : List[str]=10 ,_a : Dict=3 ,_a : Optional[Any]=32 * 8 ,_a : Dict=32 * 8 ,_a : str=4 ,_a : Dict=64 ,):
'''simple docstring'''
A_ : List[Any] = parent
A_ : Optional[Any] = batch_size
A_ : Dict = is_training
A_ : str = use_auxiliary_loss
A_ : Optional[int] = num_queries
A_ : Union[str, Any] = num_channels
A_ : Optional[Any] = min_size
A_ : Union[str, Any] = max_size
A_ : int = num_labels
A_ : Dict = hidden_dim
A_ : Optional[int] = hidden_dim
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = floats_tensor([self.batch_size, self.num_channels, self.min_size, self.max_size] ).to(
_a )
A_ : List[Any] = torch.ones([self.batch_size, self.min_size, self.max_size] ,device=_a )
A_ : Any = (
torch.rand([self.batch_size, self.num_labels, self.min_size, self.max_size] ,device=_a ) > 0.5
).float()
A_ : List[str] = (torch.rand((self.batch_size, self.num_labels) ,device=_a ) > 0.5).long()
A_ : int = self.get_config()
return config, pixel_values, pixel_mask, mask_labels, class_labels
def _a ( self : Any ):
'''simple docstring'''
A_ : List[str] = MaskaFormerConfig(
hidden_size=self.hidden_dim ,)
A_ : Optional[Any] = self.num_queries
A_ : Any = self.num_labels
A_ : Dict = [1, 1, 1, 1]
A_ : str = self.num_channels
A_ : List[Any] = 64
A_ : Optional[Any] = 128
A_ : List[str] = self.hidden_dim
A_ : str = self.hidden_dim
A_ : List[Any] = self.hidden_dim
return config
def _a ( self : List[Any] ):
'''simple docstring'''
A_ , A_ , A_ , A_ , A_ : Union[str, Any] = self.prepare_config_and_inputs()
A_ : int = {"""pixel_values""": pixel_values, """pixel_mask""": pixel_mask}
return config, inputs_dict
def _a ( self : Tuple ,_a : Tuple ,_a : str ):
'''simple docstring'''
A_ : int = output.encoder_hidden_states
A_ : Tuple = output.pixel_decoder_hidden_states
A_ : str = output.transformer_decoder_hidden_states
self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) ,len(config.backbone_config.depths ) )
self.parent.assertTrue(len(_a ) ,config.decoder_layers )
def _a ( self : Tuple ,_a : List[str] ,_a : List[Any] ,_a : str ,_a : Union[str, Any]=False ):
'''simple docstring'''
with torch.no_grad():
A_ : Optional[Any] = MaskaFormerModel(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(pixel_values=_a ,pixel_mask=_a )
A_ : str = model(_a ,output_hidden_states=_a )
self.parent.assertEqual(
output.transformer_decoder_last_hidden_state.shape ,(self.batch_size, self.num_queries, self.hidden_dim) ,)
# let's ensure the other two hidden state exists
self.parent.assertTrue(output.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(output.encoder_last_hidden_state is not None )
if output_hidden_states:
self.check_output_hidden_state(_a ,_a )
def _a ( self : Optional[int] ,_a : Any ,_a : Union[str, Any] ,_a : str ,_a : Tuple ,_a : Any ):
'''simple docstring'''
A_ : Dict = MaskaFormerForUniversalSegmentation(config=_a )
model.to(_a )
model.eval()
def comm_check_on_output(_a : Union[str, Any] ):
# let's still check that all the required stuff is there
self.parent.assertTrue(result.transformer_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.pixel_decoder_last_hidden_state is not None )
self.parent.assertTrue(result.encoder_last_hidden_state is not None )
# okay, now we need to check the logits shape
# due to the encoder compression, masks have a //4 spatial size
self.parent.assertEqual(
result.masks_queries_logits.shape ,(self.batch_size, self.num_queries, self.min_size // 4, self.max_size // 4) ,)
# + 1 for null class
self.parent.assertEqual(
result.class_queries_logits.shape ,(self.batch_size, self.num_queries, self.num_labels + 1) )
with torch.no_grad():
A_ : Union[str, Any] = model(pixel_values=_a ,pixel_mask=_a )
A_ : Dict = model(_a )
comm_check_on_output(_a )
A_ : Union[str, Any] = model(
pixel_values=_a ,pixel_mask=_a ,mask_labels=_a ,class_labels=_a )
comm_check_on_output(_a )
self.parent.assertTrue(result.loss is not None )
self.parent.assertEqual(result.loss.shape ,torch.Size([1] ) )
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (MaskaFormerModel, MaskaFormerForUniversalSegmentation) if is_torch_available() else ()
a_ = {"""feature-extraction""": MaskaFormerModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Tuple = MaskaFormerModelTester(self )
A_ : Dict = ConfigTester(self ,config_class=_a ,has_text_modality=_a )
def _a ( self : Dict ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a )
def _a ( self : int ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_maskaformer_instance_segmentation_head_model(*_a )
@unittest.skip(reason="""Mask2Former does not use inputs_embeds""" )
def _a ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not have a get_input_embeddings method""" )
def _a ( self : List[str] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former is not a generative model""" )
def _a ( self : Optional[Any] ):
'''simple docstring'''
pass
@unittest.skip(reason="""Mask2Former does not use token embeddings""" )
def _a ( self : Dict ):
'''simple docstring'''
pass
@require_torch_multi_gpu
@unittest.skip(
reason="""Mask2Former has some layers using `add_module` which doesn't work well with `nn.DataParallel`""" )
def _a ( self : Any ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
pass
def _a ( self : str ):
'''simple docstring'''
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Optional[int] = model_class(_a )
A_ : str = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : List[str] = [*signature.parameters.keys()]
A_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_a )
@slow
def _a ( self : List[str] ):
'''simple docstring'''
for model_name in ["facebook/mask2former-swin-small-coco-instance"]:
A_ : str = MaskaFormerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def _a ( self : str ):
'''simple docstring'''
A_ : List[Any] = (self.model_tester.min_size,) * 2
A_ : Any = {
"""pixel_values""": torch.randn((2, 3, *size) ,device=_a ),
"""mask_labels""": torch.randn((2, 10, *size) ,device=_a ),
"""class_labels""": torch.zeros(2 ,10 ,device=_a ).long(),
}
A_ : Optional[Any] = self.model_tester.get_config()
A_ : List[str] = MaskaFormerForUniversalSegmentation(_a ).to(_a )
A_ : int = model(**_a )
self.assertTrue(outputs.loss is not None )
def _a ( self : Any ):
'''simple docstring'''
A_ , A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs_for_common()
self.model_tester.create_and_check_maskaformer_model(_a ,**_a ,output_hidden_states=_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : Dict = model_class(_a ).to(_a )
A_ : Optional[int] = model(**_a ,output_attentions=_a )
self.assertTrue(outputs.attentions is not None )
def _a ( self : Optional[int] ):
'''simple docstring'''
if not self.model_tester.is_training:
return
A_ : List[Any] = self.all_model_classes[1]
A_ , A_ , A_ , A_ , A_ : int = self.model_tester.prepare_config_and_inputs()
A_ : Tuple = model_class(_a )
model.to(_a )
model.train()
A_ : Dict = model(_a ,mask_labels=_a ,class_labels=_a ).loss
loss.backward()
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = self.all_model_classes[1]
A_ , A_ , A_ , A_ , A_ : List[str] = self.model_tester.prepare_config_and_inputs()
A_ : int = True
A_ : Tuple = True
A_ : Union[str, Any] = model_class(_a ).to(_a )
model.train()
A_ : Dict = model(_a ,mask_labels=_a ,class_labels=_a )
A_ : Optional[int] = outputs.encoder_hidden_states[0]
encoder_hidden_states.retain_grad()
A_ : List[Any] = outputs.pixel_decoder_hidden_states[0]
pixel_decoder_hidden_states.retain_grad()
A_ : int = outputs.transformer_decoder_hidden_states[0]
transformer_decoder_hidden_states.retain_grad()
A_ : List[str] = outputs.attentions[0]
attentions.retain_grad()
outputs.loss.backward(retain_graph=_a )
self.assertIsNotNone(encoder_hidden_states.grad )
self.assertIsNotNone(pixel_decoder_hidden_states.grad )
self.assertIsNotNone(transformer_decoder_hidden_states.grad )
self.assertIsNotNone(attentions.grad )
__magic_name__ = 1e-4
def lowerCamelCase ( ):
A_ : List[str] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_vision
@slow
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a ( self : Any ):
'''simple docstring'''
return "facebook/mask2former-swin-small-coco-instance"
@cached_property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return MaskaFormerImageProcessor.from_pretrained(self.model_checkpoints ) if is_vision_available() else None
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = MaskaFormerModel.from_pretrained(self.model_checkpoints ).to(_a )
A_ : Dict = self.default_image_processor
A_ : Optional[int] = prepare_img()
A_ : Union[str, Any] = image_processor(_a ,return_tensors="""pt""" ).to(_a )
A_ : str = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a ,(1, 3, 384, 384) )
with torch.no_grad():
A_ : Union[str, Any] = model(**_a )
A_ : Tuple = torch.tensor(
[[-0.2790, -1.0717, -1.1668], [-0.5128, -0.3128, -0.4987], [-0.5832, 0.1971, -0.0197]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.encoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) )
A_ : Optional[Any] = torch.tensor(
[[0.8973, 1.1847, 1.1776], [1.1934, 1.5040, 1.5128], [1.1153, 1.4486, 1.4951]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.pixel_decoder_last_hidden_state[0, 0, :3, :3] ,_a ,atol=_a ) )
A_ : str = torch.tensor(
[[2.1152, 1.7000, -0.8603], [1.5808, 1.8004, -0.9353], [1.6043, 1.7495, -0.5999]] ).to(_a )
self.assertTrue(
torch.allclose(
outputs.transformer_decoder_last_hidden_state[0, :3, :3] ,_a ,atol=_a ) )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
A_ : str = self.default_image_processor
A_ : str = prepare_img()
A_ : str = image_processor(_a ,return_tensors="""pt""" ).to(_a )
A_ : Optional[Any] = inputs["""pixel_values"""].shape
# check size is divisible by 32
self.assertTrue((inputs_shape[-1] % 32) == 0 and (inputs_shape[-2] % 32) == 0 )
# check size
self.assertEqual(_a ,(1, 3, 384, 384) )
with torch.no_grad():
A_ : str = model(**_a )
# masks_queries_logits
A_ : Dict = outputs.masks_queries_logits
self.assertEqual(
masks_queries_logits.shape ,(1, model.config.num_queries, inputs_shape[-2] // 4, inputs_shape[-1] // 4) )
A_ : List[str] = [
[-8.7839, -9.0056, -8.8121],
[-7.4104, -7.0313, -6.5401],
[-6.6105, -6.3427, -6.4675],
]
A_ : Any = torch.tensor(_a ).to(_a )
self.assertTrue(torch.allclose(masks_queries_logits[0, 0, :3, :3] ,_a ,atol=_a ) )
# class_queries_logits
A_ : List[str] = outputs.class_queries_logits
self.assertEqual(class_queries_logits.shape ,(1, model.config.num_queries, model.config.num_labels + 1) )
A_ : Dict = torch.tensor(
[
[1.8324, -8.0835, -4.1922],
[0.8450, -9.0050, -3.6053],
[0.3045, -7.7293, -3.0275],
] ).to(_a )
self.assertTrue(torch.allclose(outputs.class_queries_logits[0, :3, :3] ,_a ,atol=_a ) )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : List[str] = MaskaFormerForUniversalSegmentation.from_pretrained(self.model_checkpoints ).to(_a ).eval()
A_ : Optional[int] = self.default_image_processor
A_ : Tuple = image_processor(
[np.zeros((3, 800, 1333) ), np.zeros((3, 800, 1333) )] ,segmentation_maps=[np.zeros((384, 384) ).astype(np.floataa ), np.zeros((384, 384) ).astype(np.floataa )] ,return_tensors="""pt""" ,)
A_ : Optional[int] = inputs["""pixel_values"""].to(_a )
A_ : int = [el.to(_a ) for el in inputs["""mask_labels"""]]
A_ : Any = [el.to(_a ) for el in inputs["""class_labels"""]]
with torch.no_grad():
A_ : Tuple = model(**_a )
self.assertTrue(outputs.loss is not None )
| 27 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_text_model"""
def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Optional[int] = intermediate_size
A_ : Optional[int] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : int = max_position_embeddings
A_ : str = hidden_act
A_ : Union[str, Any] = layer_norm_eps
A_ : Tuple = attention_dropout
A_ : Union[str, Any] = initializer_range
A_ : List[Any] = initializer_factor
@classmethod
def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : int = cls.get_config_dict(_a ,**_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_vision_model"""
def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,):
'''simple docstring'''
super().__init__(**_a )
A_ : List[str] = hidden_size
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = num_channels
A_ : str = image_size
A_ : List[Any] = patch_size
A_ : int = hidden_act
A_ : List[Any] = layer_norm_eps
A_ : List[str] = attention_dropout
A_ : str = initializer_range
A_ : str = initializer_factor
@classmethod
def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit"""
a_ = True
def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a )
if text_config is None:
A_ : List[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
A_ : Dict = OwlViTTextConfig(**_a )
A_ : Dict = OwlViTVisionConfig(**_a )
A_ : Any = projection_dim
A_ : Optional[int] = logit_scale_init_value
A_ : Optional[int] = return_dict
A_ : Dict = 1.0
@classmethod
def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a )
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
@classmethod
def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ):
'''simple docstring'''
A_ : str = {}
A_ : int = text_config
A_ : Union[str, Any] = vision_config
return cls.from_dict(_a ,**_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : Optional[int] = self.vision_config.to_dict()
A_ : List[Any] = self.__class__.model_type
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _a ( self : str ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a )
A_ : Any = super().generate_dummy_inputs(
processor.image_processor ,batch_size=_a ,framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 14
| 27 | 1 |
'''simple docstring'''
from random import shuffle
import tensorflow as tf
from numpy import array
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : Optional[Any]):
A_ : str = int(lowerCamelCase)
assert noofclusters < len(lowerCamelCase)
# Find out the dimensionality
A_ : Union[str, Any] = len(vectors[0])
# Will help select random centroids from among the available vectors
A_ : int = list(range(len(lowerCamelCase)))
shuffle(lowerCamelCase)
# GRAPH OF COMPUTATION
# We initialize a new graph and set it as the default during each run
# of this algorithm. This ensures that as this function is called
# multiple times, the default graph doesn't keep getting crowded with
# unused ops and Variables from previous function calls.
A_ : Dict = tf.Graph()
with graph.as_default():
# SESSION OF COMPUTATION
A_ : Union[str, Any] = tf.Session()
##CONSTRUCTING THE ELEMENTS OF COMPUTATION
##First lets ensure we have a Variable vector for each centroid,
##initialized to one of the vectors from the available data points
A_ : List[Any] = [
tf.Variable(vectors[vector_indices[i]]) for i in range(lowerCamelCase)
]
##These nodes will assign the centroid Variables the appropriate
##values
A_ : str = tf.placeholder("""float64""" , [dim])
A_ : Dict = []
for centroid in centroids:
cent_assigns.append(tf.assign(lowerCamelCase , lowerCamelCase))
##Variables for cluster assignments of individual vectors(initialized
##to 0 at first)
A_ : List[Any] = [tf.Variable(0) for i in range(len(lowerCamelCase))]
##These nodes will assign an assignment Variable the appropriate
##value
A_ : List[str] = tf.placeholder("""int32""")
A_ : Tuple = []
for assignment in assignments:
cluster_assigns.append(tf.assign(lowerCamelCase , lowerCamelCase))
##Now lets construct the node that will compute the mean
# The placeholder for the input
A_ : Optional[int] = tf.placeholder("""float""" , [None, dim])
# The Node/op takes the input and computes a mean along the 0th
# dimension, i.e. the list of input vectors
A_ : str = tf.reduce_mean(lowerCamelCase , 0)
##Node for computing Euclidean distances
# Placeholders for input
A_ : List[Any] = tf.placeholder("""float""" , [dim])
A_ : str = tf.placeholder("""float""" , [dim])
A_ : str = tf.sqrt(tf.reduce_sum(tf.pow(tf.sub(lowerCamelCase , lowerCamelCase) , 2)))
##This node will figure out which cluster to assign a vector to,
##based on Euclidean distances of the vector from the centroids.
# Placeholder for input
A_ : int = tf.placeholder("""float""" , [noofclusters])
A_ : Optional[int] = tf.argmin(lowerCamelCase , 0)
##INITIALIZING STATE VARIABLES
##This will help initialization of all Variables defined with respect
##to the graph. The Variable-initializer should be defined after
##all the Variables have been constructed, so that each of them
##will be included in the initialization.
A_ : List[Any] = tf.initialize_all_variables()
# Initialize all variables
sess.run(lowerCamelCase)
##CLUSTERING ITERATIONS
# Now perform the Expectation-Maximization steps of K-Means clustering
# iterations. To keep things simple, we will only do a set number of
# iterations, instead of using a Stopping Criterion.
A_ : Tuple = 100
for _ in range(lowerCamelCase):
##EXPECTATION STEP
##Based on the centroid locations till last iteration, compute
##the _expected_ centroid assignments.
# Iterate over each vector
for vector_n in range(len(lowerCamelCase)):
A_ : Tuple = vectors[vector_n]
# Compute Euclidean distance between this vector and each
# centroid. Remember that this list cannot be named
#'centroid_distances', since that is the input to the
# cluster assignment node.
A_ : Dict = [
sess.run(lowerCamelCase , feed_dict={va: vect, va: sess.run(lowerCamelCase)})
for centroid in centroids
]
# Now use the cluster assignment node, with the distances
# as the input
A_ : int = sess.run(
lowerCamelCase , feed_dict={centroid_distances: distances})
# Now assign the value to the appropriate state variable
sess.run(
cluster_assigns[vector_n] , feed_dict={assignment_value: assignment})
##MAXIMIZATION STEP
# Based on the expected state computed from the Expectation Step,
# compute the locations of the centroids so as to maximize the
# overall objective of minimizing within-cluster Sum-of-Squares
for cluster_n in range(lowerCamelCase):
# Collect all the vectors assigned to this cluster
A_ : str = [
vectors[i]
for i in range(len(lowerCamelCase))
if sess.run(assignments[i]) == cluster_n
]
# Compute new centroid location
A_ : str = sess.run(
lowerCamelCase , feed_dict={mean_input: array(lowerCamelCase)})
# Assign value to appropriate variable
sess.run(
cent_assigns[cluster_n] , feed_dict={centroid_value: new_location})
# Return centroids and assignments
A_ : Optional[Any] = sess.run(lowerCamelCase)
A_ : Dict = sess.run(lowerCamelCase)
return centroids, assignments
| 27 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""input_features""", """is_longer"""]
def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,)
A_ : Tuple = top_db
A_ : Tuple = truncation
A_ : Optional[Any] = padding
A_ : Optional[int] = fft_window_size
A_ : Dict = (fft_window_size >> 1) + 1
A_ : Any = hop_length
A_ : List[Any] = max_length_s
A_ : Tuple = max_length_s * sampling_rate
A_ : Tuple = sampling_rate
A_ : Optional[int] = frequency_min
A_ : Tuple = frequency_max
A_ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,)
A_ : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def _a ( self : int ):
'''simple docstring'''
A_ : int = copy.deepcopy(self.__dict__ )
A_ : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ):
'''simple docstring'''
A_ : List[str] = spectrogram(
_a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,)
return log_mel_spectrogram.T
def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : int = [0]
# randomly choose index for each part
A_ : List[str] = np.random.choice(ranges[0] )
A_ : int = np.random.choice(ranges[1] )
A_ : Optional[int] = np.random.choice(ranges[2] )
A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :]
A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
A_ : Dict = mel[idx_back : idx_back + chunk_frames, :]
A_ : Optional[int] = torch.tensor(mel[None, None, :] )
A_ : Dict = torch.nn.functional.interpolate(
_a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a )
A_ : str = mel_shrink[0][0].numpy()
A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A_ : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A_ : Tuple = len(_a ) - max_length
A_ : Optional[int] = np.random.randint(0 ,overflow + 1 )
A_ : List[Any] = waveform[idx : idx + max_length]
A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A_ : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 )
A_ : str = False
else:
A_ : str = self._random_mel_fusion(_a ,_a ,_a )
A_ : Optional[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
A_ : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A_ : int = int(max_length / len(_a ) )
A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A_ : List[str] = int(max_length / len(_a ) )
A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) )
A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 )
if truncation == "fusion":
A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : List[str] = truncation if truncation is not None else self.truncation
A_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
A_ : int = is_batched_numpy or (
isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a ,np.ndarray ):
A_ : str = np.asarray(_a ,dtype=np.floataa )
elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Any = [np.asarray(_a )]
# convert to mel spectrogram, truncate and pad if needed.
A_ : str = [
self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a )
for waveform in raw_speech
]
A_ : int = []
A_ : Any = []
for mel, longer in padded_inputs:
input_mel.append(_a )
is_longer.append(_a )
if truncation == "fusion" and sum(_a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A_ : List[Any] = np.random.randint(0 ,len(_a ) )
A_ : List[str] = True
if isinstance(input_mel[0] ,_a ):
A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A_ : List[str] = [[longer] for longer in is_longer]
A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
A_ : int = BatchFeature(_a )
if return_tensors is not None:
A_ : int = input_features.convert_to_tensors(_a )
return input_features
| 27 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : list , lowerCamelCase : int):
# Checks if the entire collection has been sorted
if len(lowerCamelCase) <= 1 or n <= 1:
return
insert_next(lowerCamelCase , n - 1)
rec_insertion_sort(lowerCamelCase , n - 1)
def lowerCamelCase ( lowerCamelCase : list , lowerCamelCase : int):
# Checks order between adjacent elements
if index >= len(lowerCamelCase) or collection[index - 1] <= collection[index]:
return
# Swaps adjacent elements since they are not in ascending order
A_ , A_ : Dict = (
collection[index],
collection[index - 1],
)
insert_next(lowerCamelCase , index + 1)
if __name__ == "__main__":
__magic_name__ = input('Enter integers separated by spaces: ')
__magic_name__ = [int(num) for num in numbers.split()]
rec_insertion_sort(number_list, len(number_list))
print(number_list)
| 27 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : str = batch_size
A_ : int = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_token_type_ids
A_ : int = use_labels
A_ : Dict = vocab_size
A_ : List[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : int = intermediate_size
A_ : Tuple = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Tuple = type_sequence_label_size
A_ : int = initializer_range
A_ : Optional[Any] = num_labels
A_ : str = num_choices
A_ : Optional[Any] = scope
A_ : List[Any] = self.vocab_size - 1
def _a ( self : Any ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : int = None
A_ : str = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Any = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = OpenAIGPTModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a )
A_ : str = model(_a ,token_type_ids=_a )
A_ : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ):
'''simple docstring'''
A_ : str = OpenAIGPTLMHeadModel(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ):
'''simple docstring'''
A_ : Any = OpenAIGPTDoubleHeadsModel(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : int = OpenAIGPTForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : str = config_and_inputs
A_ : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ):
'''simple docstring'''
A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,)
A_ : Any = inputs_dict["""labels"""]
A_ : Any = inputs_dict["""labels"""]
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,)
A_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = OpenAIGPTModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_a )
A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is
A_ : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : int = model.generate(_a ,do_sample=_a )
self.assertListEqual(output_ids[0].tolist() ,_a )
| 27 | 1 |
'''simple docstring'''
import operator as op
__magic_name__ = 'scaler.pt'
__magic_name__ = 'pytorch_model'
__magic_name__ = 'random_states'
__magic_name__ = 'optimizer'
__magic_name__ = 'scheduler'
__magic_name__ = 'pytorch_model.bin'
__magic_name__ = 'pytorch_model.bin.index.json'
__magic_name__ = 'model.safetensors'
__magic_name__ = 'model.safetensors.index.json'
__magic_name__ = '1.10.2'
__magic_name__ = 'py38'
__magic_name__ = '4.17.0'
__magic_name__ = ['ml.p3.16xlarge', 'ml.p3dn.24xlarge', 'ml.p4dn.24xlarge']
__magic_name__ = ['FULL_SHARD', 'SHARD_GRAD_OP', 'NO_SHARD', 'HYBRID_SHARD', 'HYBRID_SHARD_ZERO2']
__magic_name__ = ['TRANSFORMER_BASED_WRAP', 'SIZE_BASED_WRAP', 'NO_WRAP']
__magic_name__ = ['BACKWARD_PRE', 'BACKWARD_POST', 'NO_PREFETCH']
__magic_name__ = ['FULL_STATE_DICT', 'LOCAL_STATE_DICT', 'SHARDED_STATE_DICT']
__magic_name__ = '2.0.1'
__magic_name__ = ['pdsh', 'standard', 'openmpi', 'mvapich']
__magic_name__ = ['default', 'reduce-overhead', 'max-autotune']
__magic_name__ = {'>': op.gt, '>=': op.ge, '==': op.eq, '!=': op.ne, '<=': op.le, '<': op.lt}
# These are the args for `torch.distributed.launch` for pytorch < 1.9
__magic_name__ = [
'nnodes',
'nproc_per_node',
'rdzv_backend',
'rdzv_endpoint',
'rdzv_id',
'rdzv_conf',
'standalone',
'max_restarts',
'monitor_interval',
'start_method',
'role',
'module',
'm',
'no_python',
'run_path',
'log_dir',
'r',
'redirects',
't',
'tee',
'node_rank',
'master_addr',
'master_port',
]
__magic_name__ = ['DEEPSPEED', 'MULTI_GPU', 'FSDP', 'MEGATRON_LM']
__magic_name__ = ['DEEPSPEED', 'MULTI_XPU', 'FSDP']
| 27 |
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 1 |
'''simple docstring'''
from typing import Any, Dict, List, Union
from ..utils import add_end_docstrings, is_torch_available, is_vision_available, logging, requires_backends
from .base import PIPELINE_INIT_ARGS, Pipeline
if is_vision_available():
from ..image_utils import load_image
if is_torch_available():
import torch
from ..models.auto.modeling_auto import MODEL_FOR_OBJECT_DETECTION_MAPPING, MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = Dict[str, Any]
__magic_name__ = List[Prediction]
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Optional[Any] ,*_a : List[str] ,**_a : List[str] ):
'''simple docstring'''
super().__init__(*_a ,**_a )
if self.framework == "tf":
raise ValueError(f'The {self.__class__} is only available in PyTorch.' )
requires_backends(self ,"""vision""" )
self.check_model_type(
dict(MODEL_FOR_OBJECT_DETECTION_MAPPING.items() + MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.items() ) )
def _a ( self : List[Any] ,**_a : Union[str, Any] ):
'''simple docstring'''
A_ : str = {}
if "threshold" in kwargs:
A_ : Tuple = kwargs["""threshold"""]
return {}, {}, postprocess_kwargs
def __call__( self : Optional[int] ,*_a : Tuple ,**_a : Tuple ):
'''simple docstring'''
return super().__call__(*_a ,**_a )
def _a ( self : Optional[Any] ,_a : Any ):
'''simple docstring'''
A_ : Optional[int] = load_image(_a )
A_ : List[Any] = torch.IntTensor([[image.height, image.width]] )
A_ : Union[str, Any] = self.image_processor(images=[image] ,return_tensors="""pt""" )
if self.tokenizer is not None:
A_ : List[str] = self.tokenizer(text=inputs["""words"""] ,boxes=inputs["""boxes"""] ,return_tensors="""pt""" )
A_ : Tuple = target_size
return inputs
def _a ( self : Any ,_a : List[str] ):
'''simple docstring'''
A_ : str = model_inputs.pop("""target_size""" )
A_ : str = self.model(**_a )
A_ : Optional[Any] = outputs.__class__({"""target_size""": target_size, **outputs} )
if self.tokenizer is not None:
A_ : Dict = model_inputs["""bbox"""]
return model_outputs
def _a ( self : Optional[int] ,_a : Optional[Any] ,_a : Dict=0.9 ):
'''simple docstring'''
A_ : Tuple = model_outputs["""target_size"""]
if self.tokenizer is not None:
# This is a LayoutLMForTokenClassification variant.
# The OCR got the boxes and the model classified the words.
A_ , A_ : Optional[Any] = target_size[0].tolist()
def unnormalize(_a : List[Any] ):
return self._get_bounding_box(
torch.Tensor(
[
(width * bbox[0] / 1000),
(height * bbox[1] / 1000),
(width * bbox[2] / 1000),
(height * bbox[3] / 1000),
] ) )
A_ , A_ : Dict = model_outputs["""logits"""].squeeze(0 ).softmax(dim=-1 ).max(dim=-1 )
A_ : int = [self.model.config.idalabel[prediction] for prediction in classes.tolist()]
A_ : Optional[Any] = [unnormalize(_a ) for bbox in model_outputs["""bbox"""].squeeze(0 )]
A_ : Union[str, Any] = ["""score""", """label""", """box"""]
A_ : Tuple = [dict(zip(_a ,_a ) ) for vals in zip(scores.tolist() ,_a ,_a ) if vals[0] > threshold]
else:
# This is a regular ForObjectDetectionModel
A_ : Optional[Any] = self.image_processor.post_process_object_detection(_a ,_a ,_a )
A_ : Any = raw_annotations[0]
A_ : Tuple = raw_annotation["""scores"""]
A_ : Dict = raw_annotation["""labels"""]
A_ : int = raw_annotation["""boxes"""]
A_ : Union[str, Any] = scores.tolist()
A_ : int = [self.model.config.idalabel[label.item()] for label in labels]
A_ : str = [self._get_bounding_box(_a ) for box in boxes]
# {"scores": [...], ...} --> [{"score":x, ...}, ...]
A_ : int = ["""score""", """label""", """box"""]
A_ : Optional[int] = [
dict(zip(_a ,_a ) )
for vals in zip(raw_annotation["""scores"""] ,raw_annotation["""labels"""] ,raw_annotation["""boxes"""] )
]
return annotation
def _a ( self : int ,_a : "torch.Tensor" ):
'''simple docstring'''
if self.framework != "pt":
raise ValueError("""The ObjectDetectionPipeline is only available in PyTorch.""" )
A_ , A_ , A_ , A_ : Tuple = box.int().tolist()
A_ : Optional[int] = {
"""xmin""": xmin,
"""ymin""": ymin,
"""xmax""": xmax,
"""ymax""": ymax,
}
return bbox
| 27 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def lowerCamelCase ( lowerCamelCase : str):
# word like '180' or '身高' or '神'
for char in word:
A_ : Optional[Any] = ord(lowerCamelCase)
if not _is_chinese_char(lowerCamelCase):
return 0
return 1
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Any = set()
for token in tokens:
A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase)
if chinese_word:
word_set.add(lowerCamelCase)
A_ : Any = list(lowerCamelCase)
return word_list
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()):
if not chinese_word_set:
return bert_tokens
A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set])
A_ : str = bert_tokens
A_ , A_ : Any = 0, len(lowerCamelCase)
while start < end:
A_ : Tuple = True
if is_chinese(bert_word[start]):
A_ : List[str] = min(end - start , lowerCamelCase)
for i in range(lowerCamelCase , 1 , -1):
A_ : Tuple = """""".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
A_ : Dict = """##""" + bert_word[j]
A_ : str = start + i
A_ : Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer):
A_ : Union[str, Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws
A_ : int = [get_chinese_word(lowerCamelCase) for r in res]
ltp_res.extend(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : List[Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512)
bert_res.extend(res["""input_ids"""])
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Union[str, Any] = []
for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = []
for id in input_ids:
A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase)
input_tokens.append(lowerCamelCase)
A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase)
A_ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase):
if token[:2] == "##":
A_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)):
ref_id.append(lowerCamelCase)
ref_ids.append(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
return ref_ids
def lowerCamelCase ( lowerCamelCase : Tuple):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""") as f:
A_ : Optional[int] = f.readlines()
A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device
A_ : Dict = BertTokenizer.from_pretrained(args.bert)
A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase)
with open(args.save_path , """w""" , encoding="""utf-8""") as f:
A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__magic_name__ = parser.parse_args()
main(args)
| 27 | 1 |
'''simple docstring'''
from argparse import ArgumentParser
from . import BaseTransformersCLICommand
def lowerCamelCase ( lowerCamelCase : Tuple):
return DownloadCommand(args.model , args.cache_dir , args.force , args.trust_remote_code)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@staticmethod
def _a ( _a : ArgumentParser ):
'''simple docstring'''
A_ : Any = parser.add_parser("""download""" )
download_parser.add_argument(
"""--cache-dir""" ,type=_a ,default=_a ,help="""Path to location to store the models""" )
download_parser.add_argument(
"""--force""" ,action="""store_true""" ,help="""Force the model to be download even if already in cache-dir""" )
download_parser.add_argument(
"""--trust-remote-code""" ,action="""store_true""" ,help="""Whether or not to allow for custom models defined on the Hub in their own modeling files. Use only if you've reviewed the code as it will execute on your local machine""" ,)
download_parser.add_argument("""model""" ,type=_a ,help="""Name of the model to download""" )
download_parser.set_defaults(func=_a )
def __init__( self : List[str] ,_a : str ,_a : str ,_a : bool ,_a : bool ):
'''simple docstring'''
A_ : int = model
A_ : Tuple = cache
A_ : Tuple = force
A_ : str = trust_remote_code
def _a ( self : str ):
'''simple docstring'''
from ..models.auto import AutoModel, AutoTokenizer
AutoModel.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
AutoTokenizer.from_pretrained(
self._model ,cache_dir=self._cache ,force_download=self._force ,trust_remote_code=self._trust_remote_code )
| 27 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ):
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Optional[Any] = self.image_processor
def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : List[Any] ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : int ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : str ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 27 | 1 |
'''simple docstring'''
import collections
from typing import List, Optional, Union
from ...tokenization_utils_base import BatchEncoding
from ...utils import TensorType, add_end_docstrings, add_start_docstrings, logging
from ..bert.tokenization_bert_fast import BertTokenizerFast
from .tokenization_dpr import DPRContextEncoderTokenizer, DPRQuestionEncoderTokenizer, DPRReaderTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__magic_name__ = {
'vocab_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-ctx_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-ctx_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-ctx_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__magic_name__ = {
'vocab_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-question_encoder-single-nq-base': (
'https://huggingface.co/facebook/dpr-question_encoder-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-question_encoder-multiset-base': (
'https://huggingface.co/facebook/dpr-question_encoder-multiset-base/resolve/main/tokenizer.json'
),
},
}
__magic_name__ = {
'vocab_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/vocab.txt'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'facebook/dpr-reader-single-nq-base': (
'https://huggingface.co/facebook/dpr-reader-single-nq-base/resolve/main/tokenizer.json'
),
'facebook/dpr-reader-multiset-base': (
'https://huggingface.co/facebook/dpr-reader-multiset-base/resolve/main/tokenizer.json'
),
},
}
__magic_name__ = {
'facebook/dpr-ctx_encoder-single-nq-base': 512,
'facebook/dpr-ctx_encoder-multiset-base': 512,
}
__magic_name__ = {
'facebook/dpr-question_encoder-single-nq-base': 512,
'facebook/dpr-question_encoder-multiset-base': 512,
}
__magic_name__ = {
'facebook/dpr-reader-single-nq-base': 512,
'facebook/dpr-reader-multiset-base': 512,
}
__magic_name__ = {
'facebook/dpr-ctx_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-ctx_encoder-multiset-base': {'do_lower_case': True},
}
__magic_name__ = {
'facebook/dpr-question_encoder-single-nq-base': {'do_lower_case': True},
'facebook/dpr-question_encoder-multiset-base': {'do_lower_case': True},
}
__magic_name__ = {
'facebook/dpr-reader-single-nq-base': {'do_lower_case': True},
'facebook/dpr-reader-multiset-base': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = CONTEXT_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a_ = CONTEXT_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = CONTEXT_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ = DPRContextEncoderTokenizer
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = QUESTION_ENCODER_PRETRAINED_VOCAB_FILES_MAP
a_ = QUESTION_ENCODER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = QUESTION_ENCODER_PRETRAINED_INIT_CONFIGURATION
a_ = DPRQuestionEncoderTokenizer
__magic_name__ = collections.namedtuple(
'DPRSpanPrediction', ['span_score', 'relevance_score', 'doc_id', 'start_index', 'end_index', 'text']
)
__magic_name__ = collections.namedtuple('DPRReaderOutput', ['start_logits', 'end_logits', 'relevance_logits'])
__magic_name__ = r'\n Return a dictionary with the token ids of the input strings and other information to give to `.decode_best_spans`.\n It converts the strings of a question and different passages (title and text) in a sequence of IDs (integers),\n using the tokenizer and vocabulary. The resulting `input_ids` is a matrix of size `(n_passages, sequence_length)`\n with the format:\n\n [CLS] <question token ids> [SEP] <titles ids> [SEP] <texts ids>\n\n Args:\n questions (`str` or `List[str]`):\n The questions to be encoded. You can specify one question for many passages. In this case, the question\n will be duplicated like `[questions] * n_passages`. Otherwise you have to specify as many questions as in\n `titles` or `texts`.\n titles (`str` or `List[str]`):\n The passages titles to be encoded. This can be a string or a list of strings if there are several passages.\n texts (`str` or `List[str]`):\n The passages texts to be encoded. This can be a string or a list of strings if there are several passages.\n padding (`bool`, `str` or [`~utils.PaddingStrategy`], *optional*, defaults to `False`):\n Activates and controls padding. Accepts the following values:\n\n - `True` or `\'longest\'`: Pad to the longest sequence in the batch (or no padding if only a single sequence\n if provided).\n - `\'max_length\'`: Pad to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided.\n - `False` or `\'do_not_pad\'` (default): No padding (i.e., can output a batch with sequences of different\n lengths).\n truncation (`bool`, `str` or [`~tokenization_utils_base.TruncationStrategy`], *optional*, defaults to `False`):\n Activates and controls truncation. Accepts the following values:\n\n - `True` or `\'longest_first\'`: Truncate to a maximum length specified with the argument `max_length` or to\n the maximum acceptable input length for the model if that argument is not provided. This will truncate\n token by token, removing a token from the longest sequence in the pair if a pair of sequences (or a batch\n of pairs) is provided.\n - `\'only_first\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the first\n sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `\'only_second\'`: Truncate to a maximum length specified with the argument `max_length` or to the maximum\n acceptable input length for the model if that argument is not provided. This will only truncate the\n second sequence of a pair if a pair of sequences (or a batch of pairs) is provided.\n - `False` or `\'do_not_truncate\'` (default): No truncation (i.e., can output batch with sequence lengths\n greater than the model maximum admissible input size).\n max_length (`int`, *optional*):\n Controls the maximum length to use by one of the truncation/padding parameters.\n\n If left unset or set to `None`, this will use the predefined model maximum length if a maximum length\n is required by one of the truncation/padding parameters. If the model has no specific maximum input\n length (like XLNet) truncation/padding to a maximum length will be deactivated.\n return_tensors (`str` or [`~utils.TensorType`], *optional*):\n If set, will return tensors instead of list of python integers. Acceptable values are:\n\n - `\'tf\'`: Return TensorFlow `tf.constant` objects.\n - `\'pt\'`: Return PyTorch `torch.Tensor` objects.\n - `\'np\'`: Return Numpy `np.ndarray` objects.\n return_attention_mask (`bool`, *optional*):\n Whether or not to return the attention mask. If not set, will return the attention mask according to the\n specific tokenizer\'s default, defined by the `return_outputs` attribute.\n\n [What are attention masks?](../glossary#attention-mask)\n\n Return:\n `Dict[str, List[List[int]]]`: A dictionary with the following keys:\n\n - `input_ids`: List of token ids to be fed to a model.\n - `attention_mask`: List of indices specifying which tokens should be attended to by the model.\n '
@add_start_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase :
'''simple docstring'''
def __call__( self : Any ,_a : List[str] ,_a : Optional[str] = None ,_a : Optional[str] = None ,_a : Union[bool, str] = False ,_a : Union[bool, str] = False ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[bool] = None ,**_a : Tuple ,):
'''simple docstring'''
if titles is None and texts is None:
return super().__call__(
_a ,padding=_a ,truncation=_a ,max_length=_a ,return_tensors=_a ,return_attention_mask=_a ,**_a ,)
elif titles is None or texts is None:
A_ : int = titles if texts is None else texts
return super().__call__(
_a ,_a ,padding=_a ,truncation=_a ,max_length=_a ,return_tensors=_a ,return_attention_mask=_a ,**_a ,)
A_ : List[Any] = titles if not isinstance(_a ,_a ) else [titles]
A_ : Dict = texts if not isinstance(_a ,_a ) else [texts]
A_ : Tuple = len(_a )
A_ : Optional[Any] = questions if not isinstance(_a ,_a ) else [questions] * n_passages
assert len(_a ) == len(
_a ), f'There should be as many titles than texts but got {len(_a )} titles and {len(_a )} texts.'
A_ : str = super().__call__(_a ,_a ,padding=_a ,truncation=_a )["""input_ids"""]
A_ : Union[str, Any] = super().__call__(_a ,add_special_tokens=_a ,padding=_a ,truncation=_a )["""input_ids"""]
A_ : str = {
"""input_ids""": [
(encoded_question_and_title + encoded_text)[:max_length]
if max_length is not None and truncation
else encoded_question_and_title + encoded_text
for encoded_question_and_title, encoded_text in zip(_a ,_a )
]
}
if return_attention_mask is not False:
A_ : List[str] = []
for input_ids in encoded_inputs["input_ids"]:
attention_mask.append([int(input_id != self.pad_token_id ) for input_id in input_ids] )
A_ : int = attention_mask
return self.pad(_a ,padding=_a ,max_length=_a ,return_tensors=_a )
def _a ( self : Optional[int] ,_a : BatchEncoding ,_a : DPRReaderOutput ,_a : int = 16 ,_a : int = 64 ,_a : int = 4 ,):
'''simple docstring'''
A_ : Dict = reader_input["""input_ids"""]
A_ , A_ , A_ : Optional[int] = reader_output[:3]
A_ : Dict = len(_a )
A_ : Dict = sorted(range(_a ) ,reverse=_a ,key=relevance_logits.__getitem__ )
A_ : List[DPRReaderOutput] = []
for doc_id in sorted_docs:
A_ : Tuple = list(input_ids[doc_id] )
# assuming question & title information is at the beginning of the sequence
A_ : List[Any] = sequence_ids.index(self.sep_token_id ,2 ) + 1 # second sep id
if sequence_ids[-1] == self.pad_token_id:
A_ : List[str] = sequence_ids.index(self.pad_token_id )
else:
A_ : Union[str, Any] = len(_a )
A_ : Tuple = self._get_best_spans(
start_logits=start_logits[doc_id][passage_offset:sequence_len] ,end_logits=end_logits[doc_id][passage_offset:sequence_len] ,max_answer_length=_a ,top_spans=_a ,)
for start_index, end_index in best_spans:
start_index += passage_offset
end_index += passage_offset
nbest_spans_predictions.append(
DPRSpanPrediction(
span_score=start_logits[doc_id][start_index] + end_logits[doc_id][end_index] ,relevance_score=relevance_logits[doc_id] ,doc_id=_a ,start_index=_a ,end_index=_a ,text=self.decode(sequence_ids[start_index : end_index + 1] ) ,) )
if len(_a ) >= num_spans:
break
return nbest_spans_predictions[:num_spans]
def _a ( self : Optional[int] ,_a : List[int] ,_a : List[int] ,_a : int ,_a : int ,):
'''simple docstring'''
A_ : List[str] = []
for start_index, start_score in enumerate(_a ):
for answer_length, end_score in enumerate(end_logits[start_index : start_index + max_answer_length] ):
scores.append(((start_index, start_index + answer_length), start_score + end_score) )
A_ : Any = sorted(_a ,key=lambda _a : x[1] ,reverse=_a )
A_ : List[str] = []
for (start_index, end_index), score in scores:
assert start_index <= end_index, f'Wrong span indices: [{start_index}:{end_index}]'
A_ : List[str] = end_index - start_index + 1
assert length <= max_answer_length, f'Span is too long: {length} > {max_answer_length}'
if any(
start_index <= prev_start_index <= prev_end_index <= end_index
or prev_start_index <= start_index <= end_index <= prev_end_index
for (prev_start_index, prev_end_index) in chosen_span_intervals ):
continue
chosen_span_intervals.append((start_index, end_index) )
if len(_a ) == top_spans:
break
return chosen_span_intervals
@add_end_docstrings(__SCREAMING_SNAKE_CASE )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = READER_PRETRAINED_VOCAB_FILES_MAP
a_ = READER_PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = READER_PRETRAINED_INIT_CONFIGURATION
a_ = ["""input_ids""", """attention_mask"""]
a_ = DPRReaderTokenizer
| 27 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""torch""", """torchsde"""]
def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(self ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
| 27 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"):
A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {}
A_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ):
A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" )
A_ : Any = Path(_a ).joinpath(type_path + """.target""" )
A_ : Dict = self.get_char_lens(self.src_file )
A_ : Optional[int] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A_ : List[Any] = tokenizer
A_ : Optional[Any] = prefix
if n_obs is not None:
A_ : Any = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Tuple = tgt_lang
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str] ,_a : Tuple ):
'''simple docstring'''
A_ : int = index + 1 # linecache starts at 1
A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" )
A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" )
A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" )
A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze()
A_ : Dict = target_inputs["""input_ids"""].squeeze()
A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return [len(_a ) for x in Path(_a ).open().readlines()]
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
A_ : str = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(_a ,_a )
A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a )
A_ : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[List]):
return list(itertools.chain.from_iterable(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json"""))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]):
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase)
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable):
return list(map(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """wb""") as f:
return pickle.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str]):
def remove_articles(lowerCamelCase : Any):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase)
def white_space_fix(lowerCamelCase : List[Any]):
return " ".join(text.split())
def remove_punc(lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = normalize_answer(lowerCamelCase).split()
A_ : Dict = normalize_answer(lowerCamelCase).split()
A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase)
A_ : Any = sum(common.values())
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Any = 0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase):
em += exact_match_score(lowerCamelCase , lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return model_prefix.startswith("""rag""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Tuple = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase):
if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
continue
A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
return hparams, config
| 27 | 1 |
'''simple docstring'''
from ...utils import is_note_seq_available, is_transformers_available, is_torch_available
from ...utils import OptionalDependencyNotAvailable
try:
if not (is_transformers_available() and is_torch_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_torch_and_transformers_objects import * # noqa F403
else:
from .notes_encoder import SpectrogramNotesEncoder
from .continous_encoder import SpectrogramContEncoder
from .pipeline_spectrogram_diffusion import (
SpectrogramContEncoder,
SpectrogramDiffusionPipeline,
TaFilmDecoder,
)
try:
if not (is_transformers_available() and is_torch_available() and is_note_seq_available()):
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
from ...utils.dummy_transformers_and_torch_and_note_seq_objects import * # noqa F403
else:
from .midi_utils import MidiProcessor
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 1 |
'''simple docstring'''
import inspect
from typing import List, Optional, Tuple, Union
import numpy as np
import PIL
import torch
import torch.utils.checkpoint
from ...models import UNetaDModel, VQModel
from ...schedulers import (
DDIMScheduler,
DPMSolverMultistepScheduler,
EulerAncestralDiscreteScheduler,
EulerDiscreteScheduler,
LMSDiscreteScheduler,
PNDMScheduler,
)
from ...utils import PIL_INTERPOLATION, randn_tensor
from ..pipeline_utils import DiffusionPipeline, ImagePipelineOutput
def lowerCamelCase ( lowerCamelCase : Dict):
A_ , A_ : List[str] = image.size
A_ , A_ : Optional[int] = (x - x % 32 for x in (w, h)) # resize to integer multiple of 32
A_ : Optional[Any] = image.resize((w, h) , resample=PIL_INTERPOLATION["""lanczos"""])
A_ : Dict = np.array(lowerCamelCase).astype(np.floataa) / 255.0
A_ : List[str] = image[None].transpose(0 , 3 , 1 , 2)
A_ : int = torch.from_numpy(lowerCamelCase)
return 2.0 * image - 1.0
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[str] ,_a : VQModel ,_a : UNetaDModel ,_a : Union[
DDIMScheduler,
PNDMScheduler,
LMSDiscreteScheduler,
EulerDiscreteScheduler,
EulerAncestralDiscreteScheduler,
DPMSolverMultistepScheduler,
] ,):
'''simple docstring'''
super().__init__()
self.register_modules(vqvae=_a ,unet=_a ,scheduler=_a )
@torch.no_grad()
def __call__( self : Any ,_a : Union[torch.Tensor, PIL.Image.Image] = None ,_a : Optional[int] = 1 ,_a : Optional[int] = 100 ,_a : Optional[float] = 0.0 ,_a : Optional[Union[torch.Generator, List[torch.Generator]]] = None ,_a : Optional[str] = "pil" ,_a : bool = True ,):
'''simple docstring'''
if isinstance(_a ,PIL.Image.Image ):
A_ : Any = 1
elif isinstance(_a ,torch.Tensor ):
A_ : Optional[int] = image.shape[0]
else:
raise ValueError(f'`image` has to be of type `PIL.Image.Image` or `torch.Tensor` but is {type(_a )}' )
if isinstance(_a ,PIL.Image.Image ):
A_ : str = preprocess(_a )
A_ , A_ : Any = image.shape[-2:]
# in_channels should be 6: 3 for latents, 3 for low resolution image
A_ : Dict = (batch_size, self.unet.config.in_channels // 2, height, width)
A_ : Optional[int] = next(self.unet.parameters() ).dtype
A_ : int = randn_tensor(_a ,generator=_a ,device=self.device ,dtype=_a )
A_ : List[Any] = image.to(device=self.device ,dtype=_a )
# set timesteps and move to the correct device
self.scheduler.set_timesteps(_a ,device=self.device )
A_ : Optional[int] = self.scheduler.timesteps
# scale the initial noise by the standard deviation required by the scheduler
A_ : Optional[Any] = latents * self.scheduler.init_noise_sigma
# prepare extra kwargs for the scheduler step, since not all schedulers have the same signature.
# eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
# eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
# and should be between [0, 1]
A_ : Union[str, Any] = """eta""" in set(inspect.signature(self.scheduler.step ).parameters.keys() )
A_ : List[Any] = {}
if accepts_eta:
A_ : Union[str, Any] = eta
for t in self.progress_bar(_a ):
# concat latents and low resolution image in the channel dimension.
A_ : List[str] = torch.cat([latents, image] ,dim=1 )
A_ : Optional[Any] = self.scheduler.scale_model_input(_a ,_a )
# predict the noise residual
A_ : Union[str, Any] = self.unet(_a ,_a ).sample
# compute the previous noisy sample x_t -> x_t-1
A_ : Union[str, Any] = self.scheduler.step(_a ,_a ,_a ,**_a ).prev_sample
# decode the image latents with the VQVAE
A_ : Any = self.vqvae.decode(_a ).sample
A_ : Tuple = torch.clamp(_a ,-1.0 ,1.0 )
A_ : int = image / 2 + 0.5
A_ : Any = image.cpu().permute(0 ,2 ,3 ,1 ).numpy()
if output_type == "pil":
A_ : List[Any] = self.numpy_to_pil(_a )
if not return_dict:
return (image,)
return ImagePipelineOutput(images=_a )
| 27 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyVaaControlnetPipeline
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : Any ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def _a ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Tuple = UNetaDConditionModel(**_a )
return model
@property
def _a ( self : List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.dummy_unet
A_ : int = self.dummy_movq
A_ : Tuple = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,)
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ):
'''simple docstring'''
A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a )
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
A_ : Optional[Any] = torch.manual_seed(_a )
else:
A_ : str = torch.Generator(device=_a ).manual_seed(_a )
A_ : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = """cpu"""
A_ : List[str] = self.get_dummy_components()
A_ : Tuple = self.pipeline_class(**_a )
A_ : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) )
A_ : Tuple = output.images
A_ : Optional[Any] = pipe(
**self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0]
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0
A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_a )
A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa )
A_ : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = """A robot, 4k photo"""
A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ , A_ : List[str] = pipe_prior(
_a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ : List[Any] = pipeline(
image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,)
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a ,_a )
| 27 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int):
if not isinstance(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = F'Input value of [number={number}] must be an integer'
raise TypeError(lowerCamelCase)
if number < 1:
A_ : int = F'Input value of [number={number}] must be > 0'
raise ValueError(lowerCamelCase)
A_ : Optional[Any] = 1
for i in range(1 , lowerCamelCase):
current_number *= 4 * i - 2
current_number //= i + 1
return current_number
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """deberta-v2"""
def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : List[Any] = initializer_range
A_ : int = relative_attention
A_ : Tuple = max_relative_positions
A_ : int = pad_token_id
A_ : Tuple = position_biased_input
# Backwards compatibility
if type(_a ) == str:
A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : Any = pos_att_type
A_ : Optional[int] = vocab_size
A_ : Tuple = layer_norm_eps
A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a )
A_ : Union[str, Any] = pooler_dropout
A_ : List[Any] = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return 12
def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 27 | 1 |
'''simple docstring'''
import json
import os
from pathlib import Path
import pytest
from datasets.download.download_config import DownloadConfig
from datasets.download.download_manager import DownloadManager
from datasets.utils.file_utils import hash_url_to_filename
__magic_name__ = 'http://www.mocksite.com/file1.txt'
__magic_name__ = '"text": ["foo", "foo"]'
__magic_name__ = '6d8ce9aa78a471c7477201efbeabd3bb01ac2e7d100a6dc024ba1608361f90a8'
class __lowerCAmelCase :
'''simple docstring'''
a_ = 200
a_ = {"""Content-Length""": """100"""}
a_ = {}
def _a ( self : List[str] ,**_a : Dict ):
'''simple docstring'''
return [bytes(_a ,"""utf-8""" )]
def lowerCamelCase ( *lowerCamelCase : List[str] , **lowerCamelCase : Any):
return MockResponse()
@pytest.mark.parametrize("""urls_type""" , [str, list, dict])
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Dict):
import requests
monkeypatch.setattr(lowerCamelCase , """request""" , lowerCamelCase)
A_ : Dict = URL
if issubclass(lowerCamelCase , lowerCamelCase):
A_ : Union[str, Any] = url
elif issubclass(lowerCamelCase , lowerCamelCase):
A_ : Tuple = [url]
elif issubclass(lowerCamelCase , lowerCamelCase):
A_ : Union[str, Any] = {"""train""": url}
A_ : Any = """dummy"""
A_ : List[Any] = """downloads"""
A_ : Dict = tmp_path
A_ : Optional[Any] = DownloadConfig(
cache_dir=os.path.join(lowerCamelCase , lowerCamelCase) , use_etag=lowerCamelCase , )
A_ : List[str] = DownloadManager(dataset_name=lowerCamelCase , download_config=lowerCamelCase)
A_ : List[str] = dl_manager.download(lowerCamelCase)
A_ : Tuple = urls
for downloaded_paths in [downloaded_paths]:
if isinstance(lowerCamelCase , lowerCamelCase):
A_ : Dict = [downloaded_paths]
A_ : List[str] = [urls]
elif isinstance(lowerCamelCase , lowerCamelCase):
assert "train" in downloaded_paths.keys()
A_ : str = downloaded_paths.values()
A_ : List[Any] = urls.values()
assert downloaded_paths
for downloaded_path, input_url in zip(lowerCamelCase , lowerCamelCase):
assert downloaded_path == dl_manager.downloaded_paths[input_url]
A_ : Optional[int] = Path(lowerCamelCase)
A_ : List[Any] = downloaded_path.parts
assert parts[-1] == HASH
assert parts[-2] == cache_subdir
assert downloaded_path.exists()
A_ : List[str] = downloaded_path.read_text()
assert content == CONTENT
A_ : str = downloaded_path.with_suffix(""".json""")
assert metadata_downloaded_path.exists()
A_ : List[Any] = json.loads(metadata_downloaded_path.read_text())
assert metadata_content == {"url": URL, "etag": None}
@pytest.mark.parametrize("""paths_type""" , [str, list, dict])
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : List[str]):
A_ : Tuple = str(lowerCamelCase)
if issubclass(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = filename
elif issubclass(lowerCamelCase , lowerCamelCase):
A_ : Tuple = [filename]
elif issubclass(lowerCamelCase , lowerCamelCase):
A_ : Dict = {"""train""": filename}
A_ : List[str] = """dummy"""
A_ : List[str] = xz_file.parent
A_ : Tuple = """extracted"""
A_ : Optional[Any] = DownloadConfig(
cache_dir=lowerCamelCase , use_etag=lowerCamelCase , )
A_ : List[Any] = DownloadManager(dataset_name=lowerCamelCase , download_config=lowerCamelCase)
A_ : Any = dl_manager.extract(lowerCamelCase)
A_ : Union[str, Any] = paths
for extracted_paths in [extracted_paths]:
if isinstance(lowerCamelCase , lowerCamelCase):
A_ : int = [extracted_paths]
A_ : List[Any] = [paths]
elif isinstance(lowerCamelCase , lowerCamelCase):
assert "train" in extracted_paths.keys()
A_ : Any = extracted_paths.values()
A_ : Union[str, Any] = paths.values()
assert extracted_paths
for extracted_path, input_path in zip(lowerCamelCase , lowerCamelCase):
assert extracted_path == dl_manager.extracted_paths[input_path]
A_ : Optional[int] = Path(lowerCamelCase)
A_ : Union[str, Any] = extracted_path.parts
assert parts[-1] == hash_url_to_filename(lowerCamelCase , etag=lowerCamelCase)
assert parts[-2] == extracted_subdir
assert extracted_path.exists()
A_ : Optional[int] = extracted_path.read_text()
A_ : Dict = text_file.read_text()
assert extracted_file_content == expected_file_content
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int]):
assert path.endswith(""".jsonl""")
for num_items, line in enumerate(lowerCamelCase , start=1):
A_ : Any = json.loads(line.decode("""utf-8"""))
assert item.keys() == {"col_1", "col_2", "col_3"}
assert num_items == 4
@pytest.mark.parametrize("""archive_jsonl""" , ["""tar_jsonl_path""", """zip_jsonl_path"""])
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : List[str]):
A_ : List[Any] = request.getfixturevalue(lowerCamelCase)
A_ : int = DownloadManager()
for num_jsonl, (path, file) in enumerate(dl_manager.iter_archive(lowerCamelCase) , start=1):
_test_jsonl(lowerCamelCase , lowerCamelCase)
assert num_jsonl == 2
@pytest.mark.parametrize("""archive_nested_jsonl""" , ["""tar_nested_jsonl_path""", """zip_nested_jsonl_path"""])
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any):
A_ : Union[str, Any] = request.getfixturevalue(lowerCamelCase)
A_ : Union[str, Any] = DownloadManager()
for num_tar, (path, file) in enumerate(dl_manager.iter_archive(lowerCamelCase) , start=1):
for num_jsonl, (subpath, subfile) in enumerate(dl_manager.iter_archive(lowerCamelCase) , start=1):
_test_jsonl(lowerCamelCase , lowerCamelCase)
assert num_tar == 1
assert num_jsonl == 2
def lowerCamelCase ( lowerCamelCase : Any):
A_ : Dict = DownloadManager()
for num_file, file in enumerate(dl_manager.iter_files(lowerCamelCase) , start=1):
assert os.path.basename(lowerCamelCase) == ("test.txt" if num_file == 1 else "train.txt")
assert num_file == 2
| 27 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__magic_name__ = BeautifulSoup(res.text, 'html.parser')
__magic_name__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 27 | 1 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Tuple = size if size is not None else {"""shortest_edge""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" )
A_ : Any = do_resize
A_ : List[str] = size
A_ : Union[str, Any] = resample
A_ : Dict = do_center_crop
A_ : List[str] = crop_size
A_ : Any = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Any = do_normalize
A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Tuple = do_convert_rgb
def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a )
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : Optional[int] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,):
'''simple docstring'''
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,):
'''simple docstring'''
A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A_ : Tuple = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a )
A_ : List[str] = resample if resample is not None else self.resample
A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a )
A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Any = do_normalize if do_normalize is not None else self.do_normalize
A_ : int = image_mean if image_mean is not None else self.image_mean
A_ : int = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : Optional[int] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
A_ : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images]
if do_center_crop:
A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images]
if do_normalize:
A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images]
A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images]
A_ : List[str] = {"""pixel_values""": images}
return BatchFeature(data=_a ,tensor_type=_a )
| 27 |
'''simple docstring'''
from ... import PretrainedConfig
__magic_name__ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a_ = """nezha"""
def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Any = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : List[Any] = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Optional[Any] = max_relative_position
A_ : List[Any] = type_vocab_size
A_ : int = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Dict = classifier_dropout
A_ : int = use_cache
| 27 | 1 |
'''simple docstring'''
import json
import os
import tempfile
from transformers.testing_utils import check_json_file_has_correct_format
class __lowerCAmelCase :
'''simple docstring'''
a_ = None
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.feature_extraction_class(**self.feat_extract_dict )
A_ : Dict = json.loads(feat_extract.to_json_string() )
for key, value in self.feat_extract_dict.items():
self.assertEqual(obj[key] ,_a )
def _a ( self : int ):
'''simple docstring'''
A_ : int = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Tuple = os.path.join(_a ,"""feat_extract.json""" )
feat_extract_first.to_json_file(_a )
A_ : List[str] = self.feature_extraction_class.from_json_file(_a )
self.assertEqual(feat_extract_second.to_dict() ,feat_extract_first.to_dict() )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : List[str] = self.feature_extraction_class(**self.feat_extract_dict )
with tempfile.TemporaryDirectory() as tmpdirname:
A_ : Optional[int] = feat_extract_first.save_pretrained(_a )[0]
check_json_file_has_correct_format(_a )
A_ : Any = self.feature_extraction_class.from_pretrained(_a )
self.assertEqual(feat_extract_second.to_dict() ,feat_extract_first.to_dict() )
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = self.feature_extraction_class()
self.assertIsNotNone(_a )
| 27 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str):
A_ , A_ : List[Any] = set(lowerCamelCase), [start]
while stack:
A_ : Optional[Any] = stack.pop()
explored.add(lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(lowerCamelCase)
return explored
__magic_name__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 27 | 1 |
'''simple docstring'''
# Copyright 2022 The HuggingFace Team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import os
import platform
import numpy as np
import psutil
import torch
from accelerate import __version__ as version
from accelerate.commands.config import default_config_file, load_config_from_file
from ..utils import is_npu_available, is_xpu_available
def lowerCamelCase ( lowerCamelCase : Optional[int]=None):
if subparsers is not None:
A_ : Tuple = subparsers.add_parser("""env""")
else:
A_ : Dict = argparse.ArgumentParser("""Accelerate env command""")
parser.add_argument(
"""--config_file""" , default=lowerCamelCase , help="""The config file to use for the default values in the launching script.""")
if subparsers is not None:
parser.set_defaults(func=lowerCamelCase)
return parser
def lowerCamelCase ( lowerCamelCase : int):
A_ : Optional[int] = torch.__version__
A_ : Optional[int] = torch.cuda.is_available()
A_ : List[str] = is_xpu_available()
A_ : Union[str, Any] = is_npu_available()
A_ : Optional[Any] = """Not found"""
# Get the default from the config file.
if args.config_file is not None or os.path.isfile(lowerCamelCase):
A_ : Dict = load_config_from_file(args.config_file).to_dict()
A_ : Tuple = {
"""`Accelerate` version""": version,
"""Platform""": platform.platform(),
"""Python version""": platform.python_version(),
"""Numpy version""": np.__version__,
"""PyTorch version (GPU?)""": F'{pt_version} ({pt_cuda_available})',
"""PyTorch XPU available""": str(lowerCamelCase),
"""PyTorch NPU available""": str(lowerCamelCase),
"""System RAM""": F'{psutil.virtual_memory().total / 1024 ** 3:.2f} GB',
}
if pt_cuda_available:
A_ : Tuple = torch.cuda.get_device_name()
print("""\nCopy-and-paste the text below in your GitHub issue\n""")
print("""\n""".join([F'- {prop}: {val}' for prop, val in info.items()]))
print("""- `Accelerate` default config:""" if args.config_file is None else """- `Accelerate` config passed:""")
A_ : List[Any] = (
"""\n""".join([F'\t- {prop}: {val}' for prop, val in accelerate_config.items()])
if isinstance(lowerCamelCase , lowerCamelCase)
else F'\t{accelerate_config}'
)
print(lowerCamelCase)
A_ : Optional[Any] = accelerate_config
return info
def lowerCamelCase ( ):
A_ : Dict = env_command_parser()
A_ : Dict = parser.parse_args()
env_command(lowerCamelCase)
return 0
if __name__ == "__main__":
raise SystemExit(main())
| 27 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A_ : Union[str, Any] = [144, 192, 240]
A_ : int = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
A_ : List[str] = [96, 120, 144]
A_ : Any = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
A_ : Any = [64, 80, 96]
A_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
A_ : Any = 0.05
A_ : List[Any] = 2.0
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : int = 512
A_ : Optional[int] = 16
A_ : List[Any] = 21
A_ : List[str] = """pascal-voc-id2label.json"""
else:
A_ : str = 1000
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = """huggingface/label-files"""
A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False):
for i in range(1 , 6):
if F'layer_{i}.' in name:
A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.')
if "conv_1." in name:
A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""")
if ".block." in name:
A_ : Optional[Any] = name.replace(""".block.""" , """.""")
if "exp_1x1" in name:
A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""")
if "red_1x1" in name:
A_ : int = name.replace("""red_1x1""" , """reduce_1x1""")
if ".local_rep.conv_3x3." in name:
A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""")
if ".local_rep.conv_1x1." in name:
A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""")
if ".norm." in name:
A_ : Tuple = name.replace(""".norm.""" , """.normalization.""")
if ".conv." in name:
A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""")
if ".conv_proj." in name:
A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""")
for i in range(0 , 2):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.')
for i in range(2 , 6):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.')
if "expand_1x1" in name:
A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""")
if "conv_3x3" in name:
A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""")
if "reduce_1x1" in name:
A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""")
for i in range(2 , 5):
if F'.global_rep.{i}.weight' in name:
A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""")
if F'.global_rep.{i}.bias' in name:
A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""")
if ".global_rep." in name:
A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""")
if ".pre_norm_mha.0." in name:
A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""")
if ".pre_norm_mha.1.out_proj." in name:
A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""")
if ".pre_norm_ffn.0." in name:
A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""")
if ".pre_norm_ffn.1." in name:
A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""")
if ".pre_norm_ffn.4." in name:
A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""")
if ".transformer." in name:
A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""")
if ".aspp_layer." in name:
A_ : int = name.replace(""".aspp_layer.""" , """.""")
if ".aspp_pool." in name:
A_ : Tuple = name.replace(""".aspp_pool.""" , """.""")
if "seg_head." in name:
A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""")
if "segmentation_head.classifier.classifier." in name:
A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""")
if "classifier.fc." in name:
A_ : str = name.replace("""classifier.fc.""" , """classifier.""")
elif (not base_model) and ("segmentation_head." not in name):
A_ : str = """mobilevit.""" + name
return name
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False):
if base_model:
A_ : Dict = """"""
else:
A_ : Any = """mobilevit."""
for key in orig_state_dict.copy().keys():
A_ : List[Any] = orig_state_dict.pop(lowerCamelCase)
if key[:8] == "encoder.":
A_ : int = key[8:]
if "qkv" in key:
A_ : Any = key.split(""".""")
A_ : str = int(key_split[0][6:]) - 1
A_ : int = int(key_split[3])
A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}')
A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A_ : Optional[Any] = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
A_ : Dict = val[:dim, :]
A_ : Optional[int] = val[dim : dim * 2, :]
A_ : List[Any] = val[-dim:, :]
else:
A_ : Optional[Any] = val[:dim]
A_ : List[Any] = val[dim : dim * 2]
A_ : Any = val[-dim:]
else:
A_ : List[str] = val
return orig_state_dict
def lowerCamelCase ( ):
A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False):
A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase)
# load original state_dict
A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""")
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval()
else:
A_ : str = MobileViTForImageClassification(lowerCamelCase).eval()
A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase)
model.load_state_dict(lowerCamelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""")
A_ : List[Any] = model(**lowerCamelCase)
A_ : Dict = outputs.logits
if mobilevit_name.startswith("""deeplabv3_"""):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A_ : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A_ : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A_ : Tuple = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4)
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241])
elif mobilevit_name == "mobilevit_xs":
A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587])
elif mobilevit_name == "mobilevit_xxs":
A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if push_to_hub:
A_ : str = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""")
A_ : Union[str, Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCamelCase , organization="""apple""")
model.push_to_hub(lowerCamelCase , organization="""apple""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__magic_name__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 27 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int):
A_ : Tuple = int(lowerCamelCase)
if decimal in (0, 1): # Exit cases for the recursion
return str(lowerCamelCase)
A_ , A_ : List[str] = divmod(lowerCamelCase , 2)
return binary_recursive(lowerCamelCase) + str(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : str):
A_ : Optional[int] = str(lowerCamelCase).strip()
if not number:
raise ValueError("""No input value was provided""")
A_ : int = """-""" if number.startswith("""-""") else """"""
A_ : Dict = number.lstrip("""-""")
if not number.isnumeric():
raise ValueError("""Input value is not an integer""")
return F'{negative}0b{binary_recursive(int(lowerCamelCase))}'
if __name__ == "__main__":
from doctest import testmod
testmod()
| 27 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Tuple = size if size is not None else {"""shortest_edge""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" )
A_ : Any = do_resize
A_ : List[str] = size
A_ : Union[str, Any] = resample
A_ : Dict = do_center_crop
A_ : List[str] = crop_size
A_ : Any = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Any = do_normalize
A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Tuple = do_convert_rgb
def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a )
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : Optional[int] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,):
'''simple docstring'''
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,):
'''simple docstring'''
A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A_ : Tuple = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a )
A_ : List[str] = resample if resample is not None else self.resample
A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a )
A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Any = do_normalize if do_normalize is not None else self.do_normalize
A_ : int = image_mean if image_mean is not None else self.image_mean
A_ : int = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : Optional[int] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
A_ : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images]
if do_center_crop:
A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images]
if do_normalize:
A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images]
A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images]
A_ : List[str] = {"""pixel_values""": images}
return BatchFeature(data=_a ,tensor_type=_a )
| 27 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...file_utils import _LazyModule, is_torch_available
from ...utils import OptionalDependencyNotAvailable
__magic_name__ = {
'configuration_gpt_neox_japanese': ['GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP', 'GPTNeoXJapaneseConfig'],
'tokenization_gpt_neox_japanese': ['GPTNeoXJapaneseTokenizer'],
}
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST',
'GPTNeoXJapaneseForCausalLM',
'GPTNeoXJapaneseLayer',
'GPTNeoXJapaneseModel',
'GPTNeoXJapanesePreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_gpt_neox_japanese import GPT_NEOX_JAPANESE_PRETRAINED_CONFIG_ARCHIVE_MAP, GPTNeoXJapaneseConfig
from .tokenization_gpt_neox_japanese import GPTNeoXJapaneseTokenizer
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_gpt_neox_japanese import (
GPT_NEOX_JAPANESE_PRETRAINED_MODEL_ARCHIVE_LIST,
GPTNeoXJapaneseForCausalLM,
GPTNeoXJapaneseLayer,
GPTNeoXJapaneseModel,
GPTNeoXJapanesePreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ):
'''simple docstring'''
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 27 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_flax_available,
is_sentencepiece_available,
is_tf_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {
'configuration_albert': ['ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'AlbertConfig', 'AlbertOnnxConfig'],
}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['AlbertTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['AlbertTokenizerFast']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'AlbertForMaskedLM',
'AlbertForMultipleChoice',
'AlbertForPreTraining',
'AlbertForQuestionAnswering',
'AlbertForSequenceClassification',
'AlbertForTokenClassification',
'AlbertModel',
'AlbertPreTrainedModel',
'load_tf_weights_in_albert',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFAlbertForMaskedLM',
'TFAlbertForMultipleChoice',
'TFAlbertForPreTraining',
'TFAlbertForQuestionAnswering',
'TFAlbertForSequenceClassification',
'TFAlbertForTokenClassification',
'TFAlbertMainLayer',
'TFAlbertModel',
'TFAlbertPreTrainedModel',
]
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'FlaxAlbertForMaskedLM',
'FlaxAlbertForMultipleChoice',
'FlaxAlbertForPreTraining',
'FlaxAlbertForQuestionAnswering',
'FlaxAlbertForSequenceClassification',
'FlaxAlbertForTokenClassification',
'FlaxAlbertModel',
'FlaxAlbertPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_albert import ALBERT_PRETRAINED_CONFIG_ARCHIVE_MAP, AlbertConfig, AlbertOnnxConfig
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert import AlbertTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_albert_fast import AlbertTokenizerFast
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_albert import (
ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
AlbertForMaskedLM,
AlbertForMultipleChoice,
AlbertForPreTraining,
AlbertForQuestionAnswering,
AlbertForSequenceClassification,
AlbertForTokenClassification,
AlbertModel,
AlbertPreTrainedModel,
load_tf_weights_in_albert,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_albert import (
TF_ALBERT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFAlbertForMaskedLM,
TFAlbertForMultipleChoice,
TFAlbertForPreTraining,
TFAlbertForQuestionAnswering,
TFAlbertForSequenceClassification,
TFAlbertForTokenClassification,
TFAlbertMainLayer,
TFAlbertModel,
TFAlbertPreTrainedModel,
)
try:
if not is_flax_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_flax_albert import (
FlaxAlbertForMaskedLM,
FlaxAlbertForMultipleChoice,
FlaxAlbertForPreTraining,
FlaxAlbertForQuestionAnswering,
FlaxAlbertForSequenceClassification,
FlaxAlbertForTokenClassification,
FlaxAlbertModel,
FlaxAlbertPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ):
A_ : int = symbols(lowerCamelCase)
A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase)
A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase))
A_ : str = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError("""Could not find root""") from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
A_ : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 27 | 1 |
'''simple docstring'''
import inspect
import logging
import os
import random
import shutil
import tempfile
import unittest
import pytest
import torch
from torch import nn
from torch.utils.data import DataLoader, TensorDataset
from accelerate import Accelerator
from accelerate.test_utils import execute_subprocess_async, require_cuda
from accelerate.utils import ProjectConfiguration, set_seed
__magic_name__ = logging.getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : Tuple=2 , lowerCamelCase : Tuple=3 , lowerCamelCase : Tuple=16 , lowerCamelCase : int = 10 , lowerCamelCase : int = 2):
def get_dataset(lowerCamelCase : Optional[int]):
A_ : Tuple = torch.randn(batch_size * n_batches , 1)
return TensorDataset(lowerCamelCase , a * x + b + 0.1 * torch.randn(batch_size * n_batches , 1))
A_ : str = get_dataset(lowerCamelCase)
A_ : Optional[Any] = get_dataset(lowerCamelCase)
A_ : List[Any] = DataLoader(lowerCamelCase , shuffle=lowerCamelCase , batch_size=lowerCamelCase , num_workers=4)
A_ : Optional[Any] = DataLoader(lowerCamelCase , shuffle=lowerCamelCase , batch_size=lowerCamelCase , num_workers=4)
return (train_dataloader, valid_dataloader)
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : Optional[int] , lowerCamelCase : Optional[int] , lowerCamelCase : Any , lowerCamelCase : Any , lowerCamelCase : str=None):
A_ : List[str] = []
for epoch in range(lowerCamelCase):
# Train quickly
model.train()
for batch in dataloader:
A_ , A_ : int = batch
A_ : Optional[int] = model(lowerCamelCase)
A_ : str = torch.nn.functional.mse_loss(lowerCamelCase , lowerCamelCase)
accelerator.backward(lowerCamelCase)
optimizer.step()
optimizer.zero_grad()
rands.append(random.random()) # Introduce some randomness
if scheduler is not None:
scheduler.step()
return rands
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ):
'''simple docstring'''
super().__init__()
A_ : Optional[Any] = nn.Parameter(torch.randn(1 ) )
A_ : List[str] = nn.Parameter(torch.randn(1 ) )
def _a ( self : List[Any] ,_a : int ):
'''simple docstring'''
return x * self.a + self.b
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A_ : Optional[Any] = DummyModel()
A_ : Optional[int] = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
A_ , A_ : int = dummy_dataloaders()
A_ : int = ProjectConfiguration(total_limit=1 ,project_dir=_a ,automatic_checkpoint_naming=_a )
# Train baseline
A_ : Optional[Any] = Accelerator(project_config=_a )
A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare(
_a ,_a ,_a ,_a )
# Save initial
accelerator.save_state()
# Save second state
accelerator.save_state()
self.assertEqual(len(os.listdir(accelerator.project_dir ) ) ,1 )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A_ : Optional[Any] = DummyModel()
A_ : Dict = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
A_ , A_ : Optional[Any] = dummy_dataloaders()
# Train baseline
A_ : List[Any] = Accelerator()
A_ , A_ , A_ , A_ : Dict = accelerator.prepare(
_a ,_a ,_a ,_a )
# Save initial
A_ : Dict = os.path.join(_a ,"""initial""" )
accelerator.save_state(_a )
((A_) , (A_)) : Optional[Any] = model.a.item(), model.b.item()
A_ : Tuple = optimizer.state_dict()
A_ : List[str] = train(3 ,_a ,_a ,_a ,_a )
((A_) , (A_)) : Union[str, Any] = model.a.item(), model.b.item()
A_ : Optional[int] = optimizer.state_dict()
# Train partially
set_seed(42 )
A_ : Optional[Any] = DummyModel()
A_ : List[Any] = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
A_ , A_ : Tuple = dummy_dataloaders()
A_ : Optional[Any] = Accelerator()
A_ , A_ , A_ , A_ : Optional[int] = accelerator.prepare(
_a ,_a ,_a ,_a )
accelerator.load_state(_a )
((A_) , (A_)) : Any = model.a.item(), model.b.item()
A_ : str = optimizer.state_dict()
self.assertEqual(_a ,_a )
self.assertEqual(_a ,_a )
self.assertEqual(_a ,_a )
A_ : Tuple = train(2 ,_a ,_a ,_a ,_a )
# Save everything
A_ : Any = os.path.join(_a ,"""checkpoint""" )
accelerator.save_state(_a )
# Load everything back in and make sure all states work
accelerator.load_state(_a )
test_rands += train(1 ,_a ,_a ,_a ,_a )
((A_) , (A_)) : List[str] = model.a.item(), model.b.item()
A_ : Optional[Any] = optimizer.state_dict()
self.assertEqual(_a ,_a )
self.assertEqual(_a ,_a )
self.assertEqual(_a ,_a )
self.assertEqual(_a ,_a )
def _a ( self : Dict ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A_ : List[str] = DummyModel()
A_ : List[str] = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
A_ , A_ : Dict = dummy_dataloaders()
A_ : Dict = ProjectConfiguration(automatic_checkpoint_naming=_a )
# Train baseline
A_ : int = Accelerator(project_dir=_a ,project_config=_a )
A_ , A_ , A_ , A_ : Tuple = accelerator.prepare(
_a ,_a ,_a ,_a )
# Save initial
accelerator.save_state()
((A_) , (A_)) : Tuple = model.a.item(), model.b.item()
A_ : Union[str, Any] = optimizer.state_dict()
A_ : Tuple = train(3 ,_a ,_a ,_a ,_a )
((A_) , (A_)) : Any = model.a.item(), model.b.item()
A_ : List[Any] = optimizer.state_dict()
# Train partially
set_seed(42 )
A_ : Any = DummyModel()
A_ : Tuple = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
A_ , A_ : Any = dummy_dataloaders()
A_ : Union[str, Any] = ProjectConfiguration(iteration=1 ,automatic_checkpoint_naming=_a )
A_ : List[Any] = Accelerator(project_dir=_a ,project_config=_a )
A_ , A_ , A_ , A_ : Union[str, Any] = accelerator.prepare(
_a ,_a ,_a ,_a )
accelerator.load_state(os.path.join(_a ,"""checkpoints""" ,"""checkpoint_0""" ) )
((A_) , (A_)) : Optional[int] = model.a.item(), model.b.item()
A_ : List[str] = optimizer.state_dict()
self.assertEqual(_a ,_a )
self.assertEqual(_a ,_a )
self.assertEqual(_a ,_a )
A_ : Optional[int] = train(2 ,_a ,_a ,_a ,_a )
# Save everything
accelerator.save_state()
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_a ,"""checkpoints""" ,"""checkpoint_1""" ) )
test_rands += train(1 ,_a ,_a ,_a ,_a )
((A_) , (A_)) : List[Any] = model.a.item(), model.b.item()
A_ : Optional[int] = optimizer.state_dict()
self.assertEqual(_a ,_a )
self.assertEqual(_a ,_a )
self.assertEqual(_a ,_a )
self.assertEqual(_a ,_a )
def _a ( self : int ):
'''simple docstring'''
A_ : Optional[int] = torch.tensor([1, 2, 3] )
A_ : str = torch.tensor([2, 3, 4] )
A_ : str = DummyModel()
A_ : Tuple = torch.optim.Adam(net.parameters() )
A_ : List[str] = Accelerator()
with self.assertRaises(_a ) as ve:
accelerator.register_for_checkpointing(_a ,_a ,_a ,_a )
A_ : Union[str, Any] = str(ve.exception )
self.assertTrue("""Item at index 0""" in message )
self.assertTrue("""Item at index 1""" in message )
self.assertFalse("""Item at index 2""" in message )
self.assertFalse("""Item at index 3""" in message )
def _a ( self : Tuple ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A_ : List[str] = DummyModel()
A_ : Any = torch.optim.Adam(params=model.parameters() ,lr=1e-3 )
A_ : str = torch.optim.lr_scheduler.StepLR(_a ,step_size=1 ,gamma=0.99 )
A_ , A_ : List[str] = dummy_dataloaders()
A_ : Union[str, Any] = ProjectConfiguration(automatic_checkpoint_naming=_a )
# Train baseline
A_ : List[Any] = Accelerator(project_dir=_a ,project_config=_a )
A_ , A_ , A_ , A_ , A_ : str = accelerator.prepare(
_a ,_a ,_a ,_a ,_a )
# Save initial
accelerator.save_state()
A_ : List[Any] = scheduler.state_dict()
train(3 ,_a ,_a ,_a ,_a ,_a )
self.assertNotEqual(_a ,scheduler.state_dict() )
# Load everything back in and make sure all states work
accelerator.load_state(os.path.join(_a ,"""checkpoints""" ,"""checkpoint_0""" ) )
self.assertEqual(_a ,scheduler.state_dict() )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
with tempfile.TemporaryDirectory() as tmpdir:
set_seed(42 )
A_ : Any = DummyModel()
A_ : str = ProjectConfiguration(automatic_checkpoint_naming=_a ,total_limit=2 )
# Train baseline
A_ : List[str] = Accelerator(project_dir=_a ,project_config=_a )
A_ : List[str] = accelerator.prepare(_a )
# Save 3 states:
for _ in range(11 ):
accelerator.save_state()
self.assertTrue(not os.path.exists(os.path.join(_a ,"""checkpoints""" ,"""checkpoint_0""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_a ,"""checkpoints""" ,"""checkpoint_9""" ) ) )
self.assertTrue(os.path.exists(os.path.join(_a ,"""checkpoints""" ,"""checkpoint_10""" ) ) )
@require_cuda
def _a ( self : Any ):
'''simple docstring'''
A_ : List[str] = ["""torchrun""", f'--nproc_per_node={torch.cuda.device_count()}', inspect.getfile(self.__class__ )]
execute_subprocess_async(_a ,env=os.environ.copy() )
if __name__ == "__main__":
__magic_name__ = '/tmp/accelerate/state_checkpointing'
__magic_name__ = DummyModel()
__magic_name__ = torch.optim.Adam(params=model.parameters(), lr=1e-3)
__magic_name__ = torch.optim.lr_scheduler.StepLR(optimizer, step_size=1, gamma=0.9_9)
__magic_name__ , __magic_name__ = dummy_dataloaders()
__magic_name__ = ProjectConfiguration(automatic_checkpoint_naming=True)
# Train baseline
__magic_name__ = Accelerator(project_dir=savedir, project_config=project_config, mixed_precision='no')
if accelerator.process_index == 0:
if os.path.exists(savedir):
shutil.rmtree(savedir)
os.makedirs(savedir)
__magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ , __magic_name__ = accelerator.prepare(
model, optimizer, train_dataloader, valid_dataloader, scheduler
)
__magic_name__ , __magic_name__ = accelerator.prepare(model, optimizer)
train(3, model, train_dataloader, optimizer, accelerator, scheduler)
# Check that the intial optimizer is loaded on the GPU
for group in optimizer.param_groups:
__magic_name__ = group['params'][0].device
break
assert param_device.type == accelerator.device.type
__magic_name__ = model.cpu()
accelerator.wait_for_everyone()
accelerator.save_state()
accelerator.wait_for_everyone()
# Check CPU state
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='cpu')
for group in optimizer.param_groups:
__magic_name__ = group['params'][0].device
break
assert (
param_device.type == torch.device('cpu').type
), f"Loaded optimizer states did not match, expected to be loaded on the CPU but got {param_device}"
# Check device state
model.to(accelerator.device)
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='on_device')
for group in optimizer.param_groups:
__magic_name__ = group['params'][0].device
break
assert (
param_device.type == accelerator.device.type
), f"Loaded optimizer states did not match, expected to be loaded on {accelerator.device} but got {param_device}"
# Check error
with pytest.raises(TypeError, match='Unsupported optimizer map location passed'):
accelerator.load_state(os.path.join(savedir, 'checkpoints', 'checkpoint_0'), map_location='invalid')
accelerator.wait_for_everyone()
if accelerator.process_index == 0:
shutil.rmtree(savedir)
accelerator.wait_for_everyone()
| 27 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_a : Dict ):
'''simple docstring'''
super().__init__()
A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a )
A_ : int = list(model.children() )[:-2]
A_ : int = nn.Sequential(*_a )
A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self : str ,_a : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.pool(self.model(_a ) )
A_ : Any = torch.flatten(_a ,start_dim=2 )
A_ : str = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Dict = [json.loads(_a ) for l in open(_a )]
A_ : Optional[int] = os.path.dirname(_a )
A_ : Optional[Any] = tokenizer
A_ : Optional[Any] = labels
A_ : List[Any] = len(_a )
A_ : str = max_seq_length
A_ : str = transforms
def __len__( self : str ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) )
A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1]
A_ : Optional[int] = sentence[: self.max_seq_length]
A_ : Any = torch.zeros(self.n_classes )
A_ : Tuple = 1
A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" )
A_ : Union[str, Any] = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( lowerCamelCase : str):
A_ : List[Any] = [len(row["""sentence"""]) for row in batch]
A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase)
A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)):
A_ : str = input_row["""sentence"""]
A_ : Tuple = 1
A_ : int = torch.stack([row["""image"""] for row in batch])
A_ : str = torch.stack([row["""label"""] for row in batch])
A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch])
A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
])
| 27 | 1 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ):
A_ : int = symbols(lowerCamelCase)
A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase)
A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase))
A_ : str = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError("""Could not find root""") from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
A_ : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 27 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCamelCase : int):
if num <= 0:
A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCamelCase)
A_ : str = [True] * (num + 1)
A_ : Tuple = []
A_ : str = 2
A_ : Any = int(math.sqrt(lowerCamelCase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase)
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(lowerCamelCase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 27 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from PIL import Image
from transformers import CLIPTextConfig, CLIPTextModel, CLIPTokenizer
from diffusers import AutoencoderKL, DDIMScheduler, DDPMScheduler, StableDiffusionUpscalePipeline, UNetaDConditionModel
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
enable_full_determinism()
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : int ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
@property
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Any = 1
A_ : int = 3
A_ : List[Any] = (32, 32)
A_ : Optional[Any] = floats_tensor((batch_size, num_channels) + sizes ,rng=random.Random(0 ) ).to(_a )
return image
@property
def _a ( self : str ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = UNetaDConditionModel(
block_out_channels=(32, 32, 64) ,layers_per_block=2 ,sample_size=32 ,in_channels=7 ,out_channels=4 ,down_block_types=("""DownBlock2D""", """CrossAttnDownBlock2D""", """CrossAttnDownBlock2D""") ,up_block_types=("""CrossAttnUpBlock2D""", """CrossAttnUpBlock2D""", """UpBlock2D""") ,cross_attention_dim=32 ,attention_head_dim=8 ,use_linear_projection=_a ,only_cross_attention=(True, True, False) ,num_class_embeds=100 ,)
return model
@property
def _a ( self : Dict ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : str = AutoencoderKL(
block_out_channels=[32, 32, 64] ,in_channels=3 ,out_channels=3 ,down_block_types=["""DownEncoderBlock2D""", """DownEncoderBlock2D""", """DownEncoderBlock2D"""] ,up_block_types=["""UpDecoderBlock2D""", """UpDecoderBlock2D""", """UpDecoderBlock2D"""] ,latent_channels=4 ,)
return model
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : Union[str, Any] = CLIPTextConfig(
bos_token_id=0 ,eos_token_id=2 ,hidden_size=32 ,intermediate_size=37 ,layer_norm_eps=1e-05 ,num_attention_heads=4 ,num_hidden_layers=5 ,pad_token_id=1 ,vocab_size=1000 ,hidden_act="""gelu""" ,projection_dim=512 ,)
return CLIPTextModel(_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Optional[int] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : List[str] = self.dummy_cond_unet_upscale
A_ : int = DDPMScheduler()
A_ : Optional[int] = DDIMScheduler(prediction_type="""v_prediction""" )
A_ : List[str] = self.dummy_vae
A_ : List[Any] = self.dummy_text_encoder
A_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : List[str] = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
A_ : Dict = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A_ : Optional[int] = StableDiffusionUpscalePipeline(
unet=_a ,low_res_scheduler=_a ,scheduler=_a ,vae=_a ,text_encoder=_a ,tokenizer=_a ,max_noise_level=350 ,)
A_ : Tuple = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = """A painting of a squirrel eating a burger"""
A_ : Any = torch.Generator(device=_a ).manual_seed(0 )
A_ : Optional[Any] = sd_pipe(
[prompt] ,image=_a ,generator=_a ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
A_ : List[str] = output.images
A_ : str = torch.Generator(device=_a ).manual_seed(0 )
A_ : Union[str, Any] = sd_pipe(
[prompt] ,image=_a ,generator=_a ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,return_dict=_a ,)[0]
A_ : Union[str, Any] = image[0, -3:, -3:, -1]
A_ : Dict = image_from_tuple[0, -3:, -3:, -1]
A_ : Union[str, Any] = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
A_ : List[str] = np.array([0.3113, 0.3910, 0.4272, 0.4859, 0.5061, 0.4652, 0.5362, 0.5715, 0.5661] )
assert np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
assert np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
def _a ( self : Tuple ):
'''simple docstring'''
A_ : List[str] = """cpu""" # ensure determinism for the device-dependent torch.Generator
A_ : Optional[Any] = self.dummy_cond_unet_upscale
A_ : Tuple = DDPMScheduler()
A_ : List[Any] = DDIMScheduler(prediction_type="""v_prediction""" )
A_ : int = self.dummy_vae
A_ : str = self.dummy_text_encoder
A_ : Optional[Any] = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : List[str] = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
A_ : Optional[Any] = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) )
# make sure here that pndm scheduler skips prk
A_ : List[str] = StableDiffusionUpscalePipeline(
unet=_a ,low_res_scheduler=_a ,scheduler=_a ,vae=_a ,text_encoder=_a ,tokenizer=_a ,max_noise_level=350 ,)
A_ : Dict = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
A_ : List[Any] = """A painting of a squirrel eating a burger"""
A_ : Union[str, Any] = sd_pipe(
2 * [prompt] ,image=2 * [low_res_image] ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
A_ : Tuple = output.images
assert image.shape[0] == 2
A_ : Tuple = torch.Generator(device=_a ).manual_seed(0 )
A_ : List[str] = sd_pipe(
[prompt] ,image=_a ,generator=_a ,num_images_per_prompt=2 ,guidance_scale=6.0 ,noise_level=20 ,num_inference_steps=2 ,output_type="""np""" ,)
A_ : Any = output.images
assert image.shape[0] == 2
@unittest.skipIf(torch_device != """cuda""" ,"""This test requires a GPU""" )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Any = self.dummy_cond_unet_upscale
A_ : Tuple = DDPMScheduler()
A_ : Tuple = DDIMScheduler(prediction_type="""v_prediction""" )
A_ : Optional[Any] = self.dummy_vae
A_ : List[str] = self.dummy_text_encoder
A_ : Tuple = CLIPTokenizer.from_pretrained("""hf-internal-testing/tiny-random-clip""" )
A_ : Tuple = self.dummy_image.cpu().permute(0 ,2 ,3 ,1 )[0]
A_ : Tuple = Image.fromarray(np.uinta(_a ) ).convert("""RGB""" ).resize((64, 64) )
# put models in fp16, except vae as it overflows in fp16
A_ : Optional[Any] = unet.half()
A_ : Tuple = text_encoder.half()
# make sure here that pndm scheduler skips prk
A_ : List[Any] = StableDiffusionUpscalePipeline(
unet=_a ,low_res_scheduler=_a ,scheduler=_a ,vae=_a ,text_encoder=_a ,tokenizer=_a ,max_noise_level=350 ,)
A_ : str = sd_pipe.to(_a )
sd_pipe.set_progress_bar_config(disable=_a )
A_ : Optional[int] = """A painting of a squirrel eating a burger"""
A_ : List[Any] = torch.manual_seed(0 )
A_ : str = sd_pipe(
[prompt] ,image=_a ,generator=_a ,num_inference_steps=2 ,output_type="""np""" ,).images
A_ : str = low_res_image.size[0] * 4
assert image.shape == (1, expected_height_width, expected_height_width, 3)
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : str ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Dict ):
'''simple docstring'''
A_ : Any = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat.npy""" )
A_ : List[str] = """stabilityai/stable-diffusion-x4-upscaler"""
A_ : List[str] = StableDiffusionUpscalePipeline.from_pretrained(_a )
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
A_ : Tuple = """a cat sitting on a park bench"""
A_ : int = torch.manual_seed(0 )
A_ : int = pipe(
prompt=_a ,image=_a ,generator=_a ,output_type="""np""" ,)
A_ : Any = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 1e-3
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A_ : List[Any] = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main/sd2-upscale"""
"""/upsampled_cat_fp16.npy""" )
A_ : Tuple = """stabilityai/stable-diffusion-x4-upscaler"""
A_ : int = StableDiffusionUpscalePipeline.from_pretrained(
_a ,torch_dtype=torch.floataa ,)
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing()
A_ : Any = """a cat sitting on a park bench"""
A_ : Union[str, Any] = torch.manual_seed(0 )
A_ : List[str] = pipe(
prompt=_a ,image=_a ,generator=_a ,output_type="""np""" ,)
A_ : Optional[int] = output.images[0]
assert image.shape == (512, 512, 3)
assert np.abs(expected_image - image ).max() < 5e-1
def _a ( self : Optional[Any] ):
'''simple docstring'''
torch.cuda.empty_cache()
torch.cuda.reset_max_memory_allocated()
torch.cuda.reset_peak_memory_stats()
A_ : Dict = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/sd2-upscale/low_res_cat.png""" )
A_ : Any = """stabilityai/stable-diffusion-x4-upscaler"""
A_ : Tuple = StableDiffusionUpscalePipeline.from_pretrained(
_a ,torch_dtype=torch.floataa ,)
pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
pipe.enable_attention_slicing(1 )
pipe.enable_sequential_cpu_offload()
A_ : Tuple = """a cat sitting on a park bench"""
A_ : Any = torch.manual_seed(0 )
A_ : Optional[Any] = pipe(
prompt=_a ,image=_a ,generator=_a ,num_inference_steps=5 ,output_type="""np""" ,)
A_ : List[Any] = torch.cuda.max_memory_allocated()
# make sure that less than 2.9 GB is allocated
assert mem_bytes < 2.9 * 10**9
| 27 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__magic_name__ = trt.Logger(trt.Logger.WARNING)
__magic_name__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__magic_name__ = parser.parse_args()
if args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__magic_name__ = args.per_device_eval_batch_size
__magic_name__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__magic_name__ = True
__magic_name__ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__magic_name__ = 'temp_engine/bert-fp16.engine'
if args.inta:
__magic_name__ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__magic_name__ = [network.get_input(i) for i in range(network.num_inputs)]
__magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__magic_name__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__magic_name__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__magic_name__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]):
A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase)
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : str = time.time()
A_ : Tuple = end_time - start_time
A_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__magic_name__ = raw_datasets['validation'].column_names
__magic_name__ = 'question' if 'question' in column_names else column_names[0]
__magic_name__ = 'context' if 'context' in column_names else column_names[1]
__magic_name__ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__magic_name__ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__magic_name__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( lowerCamelCase : Dict):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Union[str, Any] = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase)
A_ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__magic_name__ = raw_datasets['validation']
# Validation Feature Creation
__magic_name__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__magic_name__ = default_data_collator
__magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__magic_name__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
A_ : Tuple = postprocess_qa_predictions(
examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : Dict = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase)
__magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize
# Allocate device memory for inputs and outputs.
__magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__magic_name__ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__magic_name__ = 0.0
__magic_name__ = 0
__magic_name__ = timeit.default_timer()
__magic_name__ = None
for step, batch in enumerate(eval_dataloader):
__magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__magic_name__ , __magic_name__ = outputs
__magic_name__ = torch.tensor(start_logits)
__magic_name__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__magic_name__ = nested_truncate(all_preds, len(eval_dataset))
__magic_name__ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds)
__magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 27 | 1 |
'''simple docstring'''
import os
import pytest
from attr import dataclass
__magic_name__ = 'us-east-1' # defaults region
@dataclass
class __lowerCAmelCase :
'''simple docstring'''
a_ = 42
a_ = """arn:aws:iam::558105141721:role/sagemaker_execution_role"""
a_ = {
"""task_name""": """mnli""",
"""per_device_train_batch_size""": 16,
"""per_device_eval_batch_size""": 16,
"""do_train""": True,
"""do_eval""": True,
"""do_predict""": True,
"""output_dir""": """/opt/ml/model""",
"""overwrite_output_dir""": True,
"""max_steps""": 500,
"""save_steps""": 5_500,
}
a_ = {**hyperparameters, """max_steps""": 1_000}
@property
def _a ( self : int ):
'''simple docstring'''
if self.framework == "pytorch":
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"eval_accuracy.*=\D*(.*?)$"},
{"Name": "eval_loss", "Regex": r"eval_loss.*=\D*(.*?)$"},
]
else:
return [
{"Name": "train_runtime", "Regex": r"train_runtime.*=\D*(.*?)$"},
{"Name": "eval_accuracy", "Regex": r"loss.*=\D*(.*?)]?$"},
{"Name": "eval_loss", "Regex": r"sparse_categorical_accuracy.*=\D*(.*?)]?$"},
]
@property
def _a ( self : Any ):
'''simple docstring'''
return f'{self.framework}-transfromers-test'
@property
def _a ( self : Any ):
'''simple docstring'''
return f'./tests/sagemaker/scripts/{self.framework}'
@property
def _a ( self : List[Any] ):
'''simple docstring'''
if self.framework == "pytorch":
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-pytorch-training:1.7.1-transformers4.6.1-gpu-py36-cu110-ubuntu18.04"
else:
return "763104351884.dkr.ecr.us-east-1.amazonaws.com/huggingface-tensorflow-training:2.4.1-transformers4.6.1-gpu-py37-cu110-ubuntu18.04"
@pytest.fixture(scope="""class""")
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
A_ : Dict = SageMakerTestEnvironment(framework=request.cls.framework)
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ConvNextFeatureExtractor']
__magic_name__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 27 | 1 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import ViTConfig, ViTForImageClassification, ViTImageProcessor, ViTModel
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any]=False):
A_ : Optional[int] = []
for i in range(config.num_hidden_layers):
# encoder layers: output projection, 2 feedforward neural networks and 2 layernorms
rename_keys.append((F'blocks.{i}.norm1.weight', F'vit.encoder.layer.{i}.layernorm_before.weight'))
rename_keys.append((F'blocks.{i}.norm1.bias', F'vit.encoder.layer.{i}.layernorm_before.bias'))
rename_keys.append((F'blocks.{i}.attn.proj.weight', F'vit.encoder.layer.{i}.attention.output.dense.weight'))
rename_keys.append((F'blocks.{i}.attn.proj.bias', F'vit.encoder.layer.{i}.attention.output.dense.bias'))
rename_keys.append((F'blocks.{i}.norm2.weight', F'vit.encoder.layer.{i}.layernorm_after.weight'))
rename_keys.append((F'blocks.{i}.norm2.bias', F'vit.encoder.layer.{i}.layernorm_after.bias'))
rename_keys.append((F'blocks.{i}.mlp.fc1.weight', F'vit.encoder.layer.{i}.intermediate.dense.weight'))
rename_keys.append((F'blocks.{i}.mlp.fc1.bias', F'vit.encoder.layer.{i}.intermediate.dense.bias'))
rename_keys.append((F'blocks.{i}.mlp.fc2.weight', F'vit.encoder.layer.{i}.output.dense.weight'))
rename_keys.append((F'blocks.{i}.mlp.fc2.bias', F'vit.encoder.layer.{i}.output.dense.bias'))
# projection layer + position embeddings
rename_keys.extend(
[
("""cls_token""", """vit.embeddings.cls_token"""),
("""patch_embed.proj.weight""", """vit.embeddings.patch_embeddings.projection.weight"""),
("""patch_embed.proj.bias""", """vit.embeddings.patch_embeddings.projection.bias"""),
("""pos_embed""", """vit.embeddings.position_embeddings"""),
])
if base_model:
# layernorm + pooler
rename_keys.extend(
[
("""norm.weight""", """layernorm.weight"""),
("""norm.bias""", """layernorm.bias"""),
])
# if just the base model, we should remove "vit" from all keys that start with "vit"
A_ : int = [(pair[0], pair[1][4:]) if pair[1].startswith("""vit""") else pair for pair in rename_keys]
else:
# layernorm + classification head
rename_keys.extend(
[
("""norm.weight""", """vit.layernorm.weight"""),
("""norm.bias""", """vit.layernorm.bias"""),
("""head.weight""", """classifier.weight"""),
("""head.bias""", """classifier.bias"""),
])
return rename_keys
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : List[Any]=False):
for i in range(config.num_hidden_layers):
if base_model:
A_ : str = """"""
else:
A_ : Any = """vit."""
# read in weights + bias of input projection layer (in timm, this is a single matrix + bias)
A_ : Optional[int] = state_dict.pop(F'blocks.{i}.attn.qkv.weight')
A_ : Tuple = state_dict.pop(F'blocks.{i}.attn.qkv.bias')
# next, add query, keys and values (in that order) to the state dict
A_ : List[Any] = in_proj_weight[
: config.hidden_size, :
]
A_ : str = in_proj_bias[: config.hidden_size]
A_ : Tuple = in_proj_weight[
config.hidden_size : config.hidden_size * 2, :
]
A_ : Tuple = in_proj_bias[
config.hidden_size : config.hidden_size * 2
]
A_ : Union[str, Any] = in_proj_weight[
-config.hidden_size :, :
]
A_ : Tuple = in_proj_bias[-config.hidden_size :]
def lowerCamelCase ( lowerCamelCase : int):
A_ : Union[str, Any] = ["""head.weight""", """head.bias"""]
for k in ignore_keys:
state_dict.pop(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Dict , lowerCamelCase : int):
A_ : List[str] = dct.pop(lowerCamelCase)
A_ : Tuple = val
def lowerCamelCase ( ):
A_ : Any = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Optional[Any] = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : List[Any] , lowerCamelCase : Union[str, Any]=True):
A_ : Any = ViTConfig()
# patch_size
if model_name[-1] == "8":
A_ : Tuple = 8
# set labels if required
if not base_model:
A_ : str = 1000
A_ : Any = """huggingface/label-files"""
A_ : Optional[int] = """imagenet-1k-id2label.json"""
A_ : Dict = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : Any = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : List[str] = idalabel
A_ : int = {v: k for k, v in idalabel.items()}
# size of the architecture
if model_name in ["dino_vits8", "dino_vits16"]:
A_ : Any = 384
A_ : Tuple = 1536
A_ : Union[str, Any] = 12
A_ : Union[str, Any] = 6
# load original model from torch hub
A_ : Tuple = torch.hub.load("""facebookresearch/dino:main""" , lowerCamelCase)
original_model.eval()
# load state_dict of original model, remove and rename some keys
A_ : Tuple = original_model.state_dict()
if base_model:
remove_classification_head_(lowerCamelCase)
A_ : Optional[Any] = create_rename_keys(lowerCamelCase , base_model=lowerCamelCase)
for src, dest in rename_keys:
rename_key(lowerCamelCase , lowerCamelCase , lowerCamelCase)
read_in_q_k_v(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# load HuggingFace model
if base_model:
A_ : Tuple = ViTModel(lowerCamelCase , add_pooling_layer=lowerCamelCase).eval()
else:
A_ : Union[str, Any] = ViTForImageClassification(lowerCamelCase).eval()
model.load_state_dict(lowerCamelCase)
# Check outputs on an image, prepared by ViTImageProcessor
A_ : Tuple = ViTImageProcessor()
A_ : str = image_processor(images=prepare_img() , return_tensors="""pt""")
A_ : List[Any] = encoding["""pixel_values"""]
A_ : Tuple = model(lowerCamelCase)
if base_model:
A_ : str = original_model(lowerCamelCase)
assert torch.allclose(lowerCamelCase , outputs.last_hidden_state[:, 0, :] , atol=1E-1)
else:
A_ : int = original_model(lowerCamelCase)
assert logits.shape == outputs.logits.shape
assert torch.allclose(lowerCamelCase , outputs.logits , atol=1E-3)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F'Saving model {model_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--model_name',
default='dino_vitb16',
type=str,
help='Name of the model trained with DINO you\'d like to convert.',
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--base_model',
action='store_true',
help='Whether to only convert the base model (no projection head weights).',
)
parser.set_defaults(base_model=True)
__magic_name__ = parser.parse_args()
convert_vit_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.base_model)
| 27 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_text_model"""
def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Optional[int] = intermediate_size
A_ : Optional[int] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : int = max_position_embeddings
A_ : str = hidden_act
A_ : Union[str, Any] = layer_norm_eps
A_ : Tuple = attention_dropout
A_ : Union[str, Any] = initializer_range
A_ : List[Any] = initializer_factor
@classmethod
def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : int = cls.get_config_dict(_a ,**_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_vision_model"""
def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,):
'''simple docstring'''
super().__init__(**_a )
A_ : List[str] = hidden_size
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = num_channels
A_ : str = image_size
A_ : List[Any] = patch_size
A_ : int = hidden_act
A_ : List[Any] = layer_norm_eps
A_ : List[str] = attention_dropout
A_ : str = initializer_range
A_ : str = initializer_factor
@classmethod
def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit"""
a_ = True
def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a )
if text_config is None:
A_ : List[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
A_ : Dict = OwlViTTextConfig(**_a )
A_ : Dict = OwlViTVisionConfig(**_a )
A_ : Any = projection_dim
A_ : Optional[int] = logit_scale_init_value
A_ : Optional[int] = return_dict
A_ : Dict = 1.0
@classmethod
def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a )
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
@classmethod
def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ):
'''simple docstring'''
A_ : str = {}
A_ : int = text_config
A_ : Union[str, Any] = vision_config
return cls.from_dict(_a ,**_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : Optional[int] = self.vision_config.to_dict()
A_ : List[Any] = self.__class__.model_type
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _a ( self : str ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a )
A_ : Any = super().generate_dummy_inputs(
processor.image_processor ,batch_size=_a ,framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 14
| 27 | 1 |
'''simple docstring'''
import argparse
import os
import torch
from diffusers import (
CMStochasticIterativeScheduler,
ConsistencyModelPipeline,
UNetaDModel,
)
__magic_name__ = {
'sample_size': 32,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': 1_000,
'block_out_channels': [32, 64],
'attention_head_dim': 8,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__magic_name__ = {
'sample_size': 64,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 3,
'num_class_embeds': 1_000,
'block_out_channels': [192, 192 * 2, 192 * 3, 192 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'scale_shift',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__magic_name__ = {
'sample_size': 256,
'in_channels': 3,
'out_channels': 3,
'layers_per_block': 2,
'num_class_embeds': None,
'block_out_channels': [256, 256, 256 * 2, 256 * 2, 256 * 4, 256 * 4],
'attention_head_dim': 64,
'down_block_types': [
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'ResnetDownsampleBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
'AttnDownBlock2D',
],
'up_block_types': [
'AttnUpBlock2D',
'AttnUpBlock2D',
'AttnUpBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
'ResnetUpsampleBlock2D',
],
'resnet_time_scale_shift': 'default',
'upsample_type': 'resnet',
'downsample_type': 'resnet',
}
__magic_name__ = {
'num_train_timesteps': 40,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
__magic_name__ = {
'num_train_timesteps': 201,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
__magic_name__ = {
'num_train_timesteps': 151,
'sigma_min': 0.0_0_2,
'sigma_max': 8_0.0,
}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
if isinstance(lowerCamelCase , lowerCamelCase):
return v
if v.lower() in ("yes", "true", "t", "y", "1"):
return True
elif v.lower() in ("no", "false", "f", "n", "0"):
return False
else:
raise argparse.ArgumentTypeError("""boolean value expected""")
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : Any , lowerCamelCase : Tuple=False):
A_ : Dict = checkpoint[F'{old_prefix}.in_layers.0.weight']
A_ : Optional[int] = checkpoint[F'{old_prefix}.in_layers.0.bias']
A_ : Union[str, Any] = checkpoint[F'{old_prefix}.in_layers.2.weight']
A_ : Union[str, Any] = checkpoint[F'{old_prefix}.in_layers.2.bias']
A_ : List[str] = checkpoint[F'{old_prefix}.emb_layers.1.weight']
A_ : int = checkpoint[F'{old_prefix}.emb_layers.1.bias']
A_ : int = checkpoint[F'{old_prefix}.out_layers.0.weight']
A_ : Union[str, Any] = checkpoint[F'{old_prefix}.out_layers.0.bias']
A_ : Optional[Any] = checkpoint[F'{old_prefix}.out_layers.3.weight']
A_ : Optional[int] = checkpoint[F'{old_prefix}.out_layers.3.bias']
if has_skip:
A_ : str = checkpoint[F'{old_prefix}.skip_connection.weight']
A_ : List[str] = checkpoint[F'{old_prefix}.skip_connection.bias']
return new_checkpoint
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : Optional[Any] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any]=None):
A_ , A_ , A_ : Optional[Any] = checkpoint[F'{old_prefix}.qkv.weight'].chunk(3 , dim=0)
A_ , A_ , A_ : List[Any] = checkpoint[F'{old_prefix}.qkv.bias'].chunk(3 , dim=0)
A_ : List[Any] = checkpoint[F'{old_prefix}.norm.weight']
A_ : Tuple = checkpoint[F'{old_prefix}.norm.bias']
A_ : Union[str, Any] = weight_q.squeeze(-1).squeeze(-1)
A_ : Dict = bias_q.squeeze(-1).squeeze(-1)
A_ : int = weight_k.squeeze(-1).squeeze(-1)
A_ : Tuple = bias_k.squeeze(-1).squeeze(-1)
A_ : Optional[Any] = weight_v.squeeze(-1).squeeze(-1)
A_ : Dict = bias_v.squeeze(-1).squeeze(-1)
A_ : int = (
checkpoint[F'{old_prefix}.proj_out.weight'].squeeze(-1).squeeze(-1)
)
A_ : int = checkpoint[F'{old_prefix}.proj_out.bias'].squeeze(-1).squeeze(-1)
return new_checkpoint
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : Tuple):
A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""")
A_ : Optional[Any] = {}
A_ : List[Any] = checkpoint["""time_embed.0.weight"""]
A_ : Dict = checkpoint["""time_embed.0.bias"""]
A_ : Optional[Any] = checkpoint["""time_embed.2.weight"""]
A_ : Any = checkpoint["""time_embed.2.bias"""]
if unet_config["num_class_embeds"] is not None:
A_ : Union[str, Any] = checkpoint["""label_emb.weight"""]
A_ : Union[str, Any] = checkpoint["""input_blocks.0.0.weight"""]
A_ : List[str] = checkpoint["""input_blocks.0.0.bias"""]
A_ : Optional[int] = unet_config["""down_block_types"""]
A_ : Union[str, Any] = unet_config["""layers_per_block"""]
A_ : Optional[Any] = unet_config["""attention_head_dim"""]
A_ : Dict = unet_config["""block_out_channels"""]
A_ : Union[str, Any] = 1
A_ : Optional[int] = channels_list[0]
for i, layer_type in enumerate(lowerCamelCase):
A_ : Any = channels_list[i]
A_ : Tuple = current_channels != prev_channels
if layer_type == "ResnetDownsampleBlock2D":
for j in range(lowerCamelCase):
A_ : List[str] = F'down_blocks.{i}.resnets.{j}'
A_ : Optional[int] = F'input_blocks.{current_layer}.0'
A_ : Dict = True if j == 0 and downsample_block_has_skip else False
A_ : Dict = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase)
current_layer += 1
elif layer_type == "AttnDownBlock2D":
for j in range(lowerCamelCase):
A_ : Optional[Any] = F'down_blocks.{i}.resnets.{j}'
A_ : Optional[Any] = F'input_blocks.{current_layer}.0'
A_ : Optional[int] = True if j == 0 and downsample_block_has_skip else False
A_ : List[str] = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase)
A_ : str = F'down_blocks.{i}.attentions.{j}'
A_ : List[str] = F'input_blocks.{current_layer}.1'
A_ : Optional[int] = convert_attention(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
current_layer += 1
if i != len(lowerCamelCase) - 1:
A_ : str = F'down_blocks.{i}.downsamplers.0'
A_ : List[str] = F'input_blocks.{current_layer}.0'
A_ : Tuple = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
current_layer += 1
A_ : str = current_channels
# hardcoded the mid-block for now
A_ : Optional[Any] = """mid_block.resnets.0"""
A_ : Dict = """middle_block.0"""
A_ : Any = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
A_ : int = """mid_block.attentions.0"""
A_ : Dict = """middle_block.1"""
A_ : Optional[int] = convert_attention(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
A_ : List[Any] = """mid_block.resnets.1"""
A_ : Dict = """middle_block.2"""
A_ : Optional[int] = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
A_ : Optional[Any] = 0
A_ : int = unet_config["""up_block_types"""]
for i, layer_type in enumerate(lowerCamelCase):
if layer_type == "ResnetUpsampleBlock2D":
for j in range(layers_per_block + 1):
A_ : Tuple = F'up_blocks.{i}.resnets.{j}'
A_ : str = F'output_blocks.{current_layer}.0'
A_ : Tuple = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase)
current_layer += 1
if i != len(lowerCamelCase) - 1:
A_ : int = F'up_blocks.{i}.upsamplers.0'
A_ : Tuple = F'output_blocks.{current_layer-1}.1'
A_ : Tuple = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
elif layer_type == "AttnUpBlock2D":
for j in range(layers_per_block + 1):
A_ : Tuple = F'up_blocks.{i}.resnets.{j}'
A_ : Union[str, Any] = F'output_blocks.{current_layer}.0'
A_ : Union[str, Any] = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_skip=lowerCamelCase)
A_ : List[str] = F'up_blocks.{i}.attentions.{j}'
A_ : Optional[Any] = F'output_blocks.{current_layer}.1'
A_ : int = convert_attention(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
current_layer += 1
if i != len(lowerCamelCase) - 1:
A_ : str = F'up_blocks.{i}.upsamplers.0'
A_ : List[Any] = F'output_blocks.{current_layer-1}.2'
A_ : Dict = convert_resnet(lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
A_ : str = checkpoint["""out.0.weight"""]
A_ : Dict = checkpoint["""out.0.bias"""]
A_ : Optional[Any] = checkpoint["""out.2.weight"""]
A_ : int = checkpoint["""out.2.bias"""]
return new_checkpoint
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument('--unet_path', default=None, type=str, required=True, help='Path to the unet.pt to convert.')
parser.add_argument(
'--dump_path', default=None, type=str, required=True, help='Path to output the converted UNet model.'
)
parser.add_argument('--class_cond', default=True, type=str, help='Whether the model is class-conditional.')
__magic_name__ = parser.parse_args()
__magic_name__ = strabool(args.class_cond)
__magic_name__ = os.path.basename(args.unet_path)
print(f"""Checkpoint: {ckpt_name}""")
# Get U-Net config
if "imagenet64" in ckpt_name:
__magic_name__ = IMAGENET_64_UNET_CONFIG
elif "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__magic_name__ = LSUN_256_UNET_CONFIG
elif "test" in ckpt_name:
__magic_name__ = TEST_UNET_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
if not args.class_cond:
__magic_name__ = None
__magic_name__ = con_pt_to_diffuser(args.unet_path, unet_config)
__magic_name__ = UNetaDModel(**unet_config)
image_unet.load_state_dict(converted_unet_ckpt)
# Get scheduler config
if "cd" in ckpt_name or "test" in ckpt_name:
__magic_name__ = CD_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "imagenet64" in ckpt_name:
__magic_name__ = CT_IMAGENET_64_SCHEDULER_CONFIG
elif "ct" in ckpt_name and "256" in ckpt_name and (("bedroom" in ckpt_name) or ("cat" in ckpt_name)):
__magic_name__ = CT_LSUN_256_SCHEDULER_CONFIG
else:
raise ValueError(f"""Checkpoint type {ckpt_name} is not currently supported.""")
__magic_name__ = CMStochasticIterativeScheduler(**scheduler_config)
__magic_name__ = ConsistencyModelPipeline(unet=image_unet, scheduler=cm_scheduler)
consistency_model.save_pretrained(args.dump_path)
| 27 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""input_features""", """is_longer"""]
def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,)
A_ : Tuple = top_db
A_ : Tuple = truncation
A_ : Optional[Any] = padding
A_ : Optional[int] = fft_window_size
A_ : Dict = (fft_window_size >> 1) + 1
A_ : Any = hop_length
A_ : List[Any] = max_length_s
A_ : Tuple = max_length_s * sampling_rate
A_ : Tuple = sampling_rate
A_ : Optional[int] = frequency_min
A_ : Tuple = frequency_max
A_ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,)
A_ : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def _a ( self : int ):
'''simple docstring'''
A_ : int = copy.deepcopy(self.__dict__ )
A_ : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ):
'''simple docstring'''
A_ : List[str] = spectrogram(
_a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,)
return log_mel_spectrogram.T
def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : int = [0]
# randomly choose index for each part
A_ : List[str] = np.random.choice(ranges[0] )
A_ : int = np.random.choice(ranges[1] )
A_ : Optional[int] = np.random.choice(ranges[2] )
A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :]
A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
A_ : Dict = mel[idx_back : idx_back + chunk_frames, :]
A_ : Optional[int] = torch.tensor(mel[None, None, :] )
A_ : Dict = torch.nn.functional.interpolate(
_a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a )
A_ : str = mel_shrink[0][0].numpy()
A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A_ : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A_ : Tuple = len(_a ) - max_length
A_ : Optional[int] = np.random.randint(0 ,overflow + 1 )
A_ : List[Any] = waveform[idx : idx + max_length]
A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A_ : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 )
A_ : str = False
else:
A_ : str = self._random_mel_fusion(_a ,_a ,_a )
A_ : Optional[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
A_ : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A_ : int = int(max_length / len(_a ) )
A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A_ : List[str] = int(max_length / len(_a ) )
A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) )
A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 )
if truncation == "fusion":
A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : List[str] = truncation if truncation is not None else self.truncation
A_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
A_ : int = is_batched_numpy or (
isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a ,np.ndarray ):
A_ : str = np.asarray(_a ,dtype=np.floataa )
elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Any = [np.asarray(_a )]
# convert to mel spectrogram, truncate and pad if needed.
A_ : str = [
self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a )
for waveform in raw_speech
]
A_ : int = []
A_ : Any = []
for mel, longer in padded_inputs:
input_mel.append(_a )
is_longer.append(_a )
if truncation == "fusion" and sum(_a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A_ : List[Any] = np.random.randint(0 ,len(_a ) )
A_ : List[str] = True
if isinstance(input_mel[0] ,_a ):
A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A_ : List[str] = [[longer] for longer in is_longer]
A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
A_ : int = BatchFeature(_a )
if return_tensors is not None:
A_ : int = input_features.convert_to_tensors(_a )
return input_features
| 27 | 1 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str):
A_ , A_ : List[Any] = set(lowerCamelCase), [start]
while stack:
A_ : Optional[Any] = stack.pop()
explored.add(lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(lowerCamelCase)
return explored
__magic_name__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 27 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : str = batch_size
A_ : int = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_token_type_ids
A_ : int = use_labels
A_ : Dict = vocab_size
A_ : List[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : int = intermediate_size
A_ : Tuple = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Tuple = type_sequence_label_size
A_ : int = initializer_range
A_ : Optional[Any] = num_labels
A_ : str = num_choices
A_ : Optional[Any] = scope
A_ : List[Any] = self.vocab_size - 1
def _a ( self : Any ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : int = None
A_ : str = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Any = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = OpenAIGPTModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a )
A_ : str = model(_a ,token_type_ids=_a )
A_ : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ):
'''simple docstring'''
A_ : str = OpenAIGPTLMHeadModel(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ):
'''simple docstring'''
A_ : Any = OpenAIGPTDoubleHeadsModel(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : int = OpenAIGPTForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : str = config_and_inputs
A_ : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ):
'''simple docstring'''
A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,)
A_ : Any = inputs_dict["""labels"""]
A_ : Any = inputs_dict["""labels"""]
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,)
A_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = OpenAIGPTModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_a )
A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is
A_ : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : int = model.generate(_a ,do_sample=_a )
self.assertListEqual(output_ids[0].tolist() ,_a )
| 27 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import AddedToken, PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'sentencepiece.bpe.model'}
__magic_name__ = {
'vocab_file': {
'moussaKam/mbarthez': 'https://huggingface.co/moussaKam/mbarthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez': 'https://huggingface.co/moussaKam/barthez/resolve/main/sentencepiece.bpe.model',
'moussaKam/barthez-orangesum-title': (
'https://huggingface.co/moussaKam/barthez-orangesum-title/resolve/main/sentencepiece.bpe.model'
),
},
}
__magic_name__ = {
'moussaKam/mbarthez': 1_024,
'moussaKam/barthez': 1_024,
'moussaKam/barthez-orangesum-title': 1_024,
}
__magic_name__ = '▁'
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : str ,_a : Tuple ,_a : List[Any]="<s>" ,_a : Dict="</s>" ,_a : Union[str, Any]="</s>" ,_a : Tuple="<s>" ,_a : int="<unk>" ,_a : Union[str, Any]="<pad>" ,_a : Any="<mask>" ,_a : Optional[Dict[str, Any]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : Optional[Any] = AddedToken(_a ,lstrip=_a ,rstrip=_a ) if isinstance(_a ,_a ) else mask_token
A_ : Any = {} if sp_model_kwargs is None else sp_model_kwargs
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,sep_token=_a ,cls_token=_a ,pad_token=_a ,mask_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : Any = vocab_file
A_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(str(_a ) )
A_ : str = {"""<s>""": 0, """<pad>""": 1, """</s>""": 2, """<unk>""": 3}
A_ : Optional[Any] = len(self.sp_model ) - 1
A_ : Optional[int] = {v: k for k, v in self.fairseq_tokens_to_ids.items()}
def _a ( self : Optional[Any] ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
if token_ids_a is None:
return [self.cls_token_id] + token_ids_a + [self.sep_token_id]
A_ : Tuple = [self.cls_token_id]
A_ : List[str] = [self.sep_token_id]
return cls + token_ids_a + sep + sep + token_ids_a + sep
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ,_a : bool = False ):
'''simple docstring'''
if already_has_special_tokens:
return super().get_special_tokens_mask(
token_ids_a=_a ,token_ids_a=_a ,already_has_special_tokens=_a )
if token_ids_a is None:
return [1] + ([0] * len(_a )) + [1]
return [1] + ([0] * len(_a )) + [1, 1] + ([0] * len(_a )) + [1]
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : str = [self.sep_token_id]
A_ : List[str] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep + sep + token_ids_a + sep ) * [0]
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return len(self.sp_model )
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def _a ( self : str ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : Dict ,_a : List[str] ):
'''simple docstring'''
if token in self.fairseq_tokens_to_ids:
return self.fairseq_tokens_to_ids[token]
A_ : str = self.sp_model.PieceToId(_a )
return spm_id if spm_id else self.unk_token_id
def _a ( self : Optional[int] ,_a : List[Any] ):
'''simple docstring'''
if index in self.fairseq_ids_to_tokens:
return self.fairseq_ids_to_tokens[index]
return self.sp_model.IdToPiece(_a )
def _a ( self : Dict ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Optional[Any] = []
A_ : int = """"""
A_ : int = False
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
if not prev_is_special:
out_string += " "
out_string += self.sp_model.decode(_a ) + token
A_ : int = True
A_ : Optional[int] = []
else:
current_sub_tokens.append(_a )
A_ : Optional[Any] = False
out_string += self.sp_model.decode(_a )
return out_string.strip()
def __getstate__( self : List[str] ):
'''simple docstring'''
A_ : str = self.__dict__.copy()
A_ : Any = None
return state
def __setstate__( self : Union[str, Any] ,_a : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Optional[int] = {}
A_ : Optional[int] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : List[Any] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : Dict = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : int = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 27 |
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ConvNextFeatureExtractor']
__magic_name__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 27 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def lowerCamelCase ( lowerCamelCase : str):
# word like '180' or '身高' or '神'
for char in word:
A_ : Optional[Any] = ord(lowerCamelCase)
if not _is_chinese_char(lowerCamelCase):
return 0
return 1
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Any = set()
for token in tokens:
A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase)
if chinese_word:
word_set.add(lowerCamelCase)
A_ : Any = list(lowerCamelCase)
return word_list
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()):
if not chinese_word_set:
return bert_tokens
A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set])
A_ : str = bert_tokens
A_ , A_ : Any = 0, len(lowerCamelCase)
while start < end:
A_ : Tuple = True
if is_chinese(bert_word[start]):
A_ : List[str] = min(end - start , lowerCamelCase)
for i in range(lowerCamelCase , 1 , -1):
A_ : Tuple = """""".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
A_ : Dict = """##""" + bert_word[j]
A_ : str = start + i
A_ : Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer):
A_ : Union[str, Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws
A_ : int = [get_chinese_word(lowerCamelCase) for r in res]
ltp_res.extend(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : List[Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512)
bert_res.extend(res["""input_ids"""])
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Union[str, Any] = []
for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = []
for id in input_ids:
A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase)
input_tokens.append(lowerCamelCase)
A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase)
A_ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase):
if token[:2] == "##":
A_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)):
ref_id.append(lowerCamelCase)
ref_ids.append(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
return ref_ids
def lowerCamelCase ( lowerCamelCase : Tuple):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""") as f:
A_ : Optional[int] = f.readlines()
A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device
A_ : Dict = BertTokenizer.from_pretrained(args.bert)
A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase)
with open(args.save_path , """w""" , encoding="""utf-8""") as f:
A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__magic_name__ = parser.parse_args()
main(args)
| 27 | 1 |
'''simple docstring'''
import re
from flax.core.frozen_dict import freeze
from flax.traverse_util import flatten_dict, unflatten_dict
from jax.experimental import PartitionSpec as P
# Sentinels
__magic_name__ = object()
# For specifying empty leaf dict `{}`
__magic_name__ = object()
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : List[str]):
A_ : List[Any] = tuple((re.compile(x + """$""") for x in qs))
for i in range(len(lowerCamelCase) - len(lowerCamelCase) + 1):
A_ : List[str] = [x.match(lowerCamelCase) for x, y in zip(lowerCamelCase , ks[i:])]
if matches and all(lowerCamelCase):
return True
return False
def lowerCamelCase ( lowerCamelCase : List[Any]):
def replace(lowerCamelCase : str , lowerCamelCase : List[Any]):
for rule, replacement in rules:
if _match(lowerCamelCase , lowerCamelCase):
return replacement
return val
return replace
def lowerCamelCase ( ):
return [
# embeddings
(("transformer", "wpe", "embedding"), P("""mp""" , lowerCamelCase)),
(("transformer", "wte", "embedding"), P("""mp""" , lowerCamelCase)),
# atention
(("attention", "(q_proj|k_proj|v_proj)", "kernel"), P(lowerCamelCase , """mp""")),
(("attention", "out_proj", "kernel"), P("""mp""" , lowerCamelCase)),
(("attention", "out_proj", "bias"), None),
# mlp
(("mlp", "c_fc", "kernel"), P(lowerCamelCase , """mp""")),
(("mlp", "c_fc", "bias"), P("""mp""")),
(("mlp", "c_proj", "kernel"), P("""mp""" , lowerCamelCase)),
(("mlp", "c_proj", "bias"), None),
# layer norms
((r"ln_\d+", "bias"), None),
((r"\d+", r"ln_\d+", "scale"), None),
(("ln_f", "bias"), None),
(("ln_f", "scale"), None),
]
def lowerCamelCase ( lowerCamelCase : str):
A_ : Dict = _get_partition_rules()
A_ : Optional[int] = _replacement_rules(lowerCamelCase)
A_ : Optional[Any] = {k: _unmatched for k in flatten_dict(lowerCamelCase)}
A_ : str = {k: replace(lowerCamelCase , lowerCamelCase) for k, v in initd.items()}
assert _unmatched not in result.values(), "Incomplete partition spec."
return freeze(unflatten_dict(lowerCamelCase))
| 27 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ):
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Optional[Any] = self.image_processor
def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : List[Any] ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : int ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : str ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 27 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import OptionalDependencyNotAvailable, _LazyModule, is_torch_available, is_vision_available
__magic_name__ = {
'configuration_conditional_detr': [
'CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP',
'ConditionalDetrConfig',
'ConditionalDetrOnnxConfig',
]
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ConditionalDetrFeatureExtractor']
__magic_name__ = ['ConditionalDetrImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConditionalDetrForObjectDetection',
'ConditionalDetrForSegmentation',
'ConditionalDetrModel',
'ConditionalDetrPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_CONFIG_ARCHIVE_MAP,
ConditionalDetrConfig,
ConditionalDetrOnnxConfig,
)
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_conditional_detr import ConditionalDetrFeatureExtractor
from .image_processing_conditional_detr import ConditionalDetrImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_conditional_detr import (
CONDITIONAL_DETR_PRETRAINED_MODEL_ARCHIVE_LIST,
ConditionalDetrForObjectDetection,
ConditionalDetrForSegmentation,
ConditionalDetrModel,
ConditionalDetrPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""torch""", """torchsde"""]
def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(self ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
| 27 | 1 |
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"):
A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {}
A_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ):
A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" )
A_ : Any = Path(_a ).joinpath(type_path + """.target""" )
A_ : Dict = self.get_char_lens(self.src_file )
A_ : Optional[int] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A_ : List[Any] = tokenizer
A_ : Optional[Any] = prefix
if n_obs is not None:
A_ : Any = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Tuple = tgt_lang
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str] ,_a : Tuple ):
'''simple docstring'''
A_ : int = index + 1 # linecache starts at 1
A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" )
A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" )
A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" )
A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze()
A_ : Dict = target_inputs["""input_ids"""].squeeze()
A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return [len(_a ) for x in Path(_a ).open().readlines()]
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
A_ : str = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(_a ,_a )
A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a )
A_ : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[List]):
return list(itertools.chain.from_iterable(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json"""))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]):
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase)
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable):
return list(map(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """wb""") as f:
return pickle.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str]):
def remove_articles(lowerCamelCase : Any):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase)
def white_space_fix(lowerCamelCase : List[Any]):
return " ".join(text.split())
def remove_punc(lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = normalize_answer(lowerCamelCase).split()
A_ : Dict = normalize_answer(lowerCamelCase).split()
A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase)
A_ : Any = sum(common.values())
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Any = 0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase):
em += exact_match_score(lowerCamelCase , lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return model_prefix.startswith("""rag""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Tuple = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase):
if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
continue
A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
return hparams, config
| 27 | 1 |
'''simple docstring'''
import argparse
import re
import numpy as np
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
SamConfig,
SamImageProcessor,
SamModel,
SamProcessor,
SamVisionConfig,
)
__magic_name__ = {
'iou_prediction_head.layers.0': 'iou_prediction_head.proj_in',
'iou_prediction_head.layers.1': 'iou_prediction_head.layers.0',
'iou_prediction_head.layers.2': 'iou_prediction_head.proj_out',
'mask_decoder.output_upscaling.0': 'mask_decoder.upscale_conv1',
'mask_decoder.output_upscaling.1': 'mask_decoder.upscale_layer_norm',
'mask_decoder.output_upscaling.3': 'mask_decoder.upscale_conv2',
'mask_downscaling.0': 'mask_embed.conv1',
'mask_downscaling.1': 'mask_embed.layer_norm1',
'mask_downscaling.3': 'mask_embed.conv2',
'mask_downscaling.4': 'mask_embed.layer_norm2',
'mask_downscaling.6': 'mask_embed.conv3',
'point_embeddings': 'point_embed',
'pe_layer.positional_encoding_gaussian_matrix': 'shared_embedding.positional_embedding',
'image_encoder': 'vision_encoder',
'neck.0': 'neck.conv1',
'neck.1': 'neck.layer_norm1',
'neck.2': 'neck.conv2',
'neck.3': 'neck.layer_norm2',
'patch_embed.proj': 'patch_embed.projection',
'.norm': '.layer_norm',
'blocks': 'layers',
}
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
A_ : Optional[Any] = {}
state_dict.pop("""pixel_mean""" , lowerCamelCase)
state_dict.pop("""pixel_std""" , lowerCamelCase)
A_ : List[Any] = r""".*.output_hypernetworks_mlps.(\d+).layers.(\d+).*"""
for key, value in state_dict.items():
for key_to_modify, new_key in KEYS_TO_MODIFY_MAPPING.items():
if key_to_modify in key:
A_ : Dict = key.replace(lowerCamelCase , lowerCamelCase)
if re.match(lowerCamelCase , lowerCamelCase):
A_ : Union[str, Any] = int(re.match(lowerCamelCase , lowerCamelCase).group(2))
if layer_nb == 0:
A_ : List[str] = key.replace("""layers.0""" , """proj_in""")
elif layer_nb == 1:
A_ : Optional[Any] = key.replace("""layers.1""" , """layers.0""")
elif layer_nb == 2:
A_ : int = key.replace("""layers.2""" , """proj_out""")
A_ : int = value
A_ : str = model_state_dict[
"""prompt_encoder.shared_embedding.positional_embedding"""
]
return model_state_dict
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Optional[Any] , lowerCamelCase : str , lowerCamelCase : Union[str, Any]="ybelkada/segment-anything"):
A_ : List[Any] = hf_hub_download(lowerCamelCase , F'checkpoints/{model_name}.pth')
if "sam_vit_b" in model_name:
A_ : Optional[int] = SamConfig()
elif "sam_vit_l" in model_name:
A_ : Optional[Any] = SamVisionConfig(
hidden_size=1024 , num_hidden_layers=24 , num_attention_heads=16 , global_attn_indexes=[5, 11, 17, 23] , )
A_ : int = SamConfig(
vision_config=lowerCamelCase , )
elif "sam_vit_h" in model_name:
A_ : str = SamVisionConfig(
hidden_size=1280 , num_hidden_layers=32 , num_attention_heads=16 , global_attn_indexes=[7, 15, 23, 31] , )
A_ : Optional[int] = SamConfig(
vision_config=lowerCamelCase , )
A_ : int = torch.load(lowerCamelCase , map_location="""cpu""")
A_ : List[str] = replace_keys(lowerCamelCase)
A_ : Dict = SamImageProcessor()
A_ : Union[str, Any] = SamProcessor(image_processor=lowerCamelCase)
A_ : List[Any] = SamModel(lowerCamelCase)
hf_model.load_state_dict(lowerCamelCase)
A_ : List[str] = hf_model.to("""cuda""")
A_ : int = """https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"""
A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw).convert("""RGB""")
A_ : List[Any] = [[[400, 650]]]
A_ : Union[str, Any] = [[1]]
A_ : List[Any] = processor(images=np.array(lowerCamelCase) , return_tensors="""pt""").to("""cuda""")
with torch.no_grad():
A_ : Union[str, Any] = hf_model(**lowerCamelCase)
A_ : Tuple = output.iou_scores.squeeze()
if model_name == "sam_vit_h_4b8939":
assert scores[-1].item() == 0.579_8902_5115_9668
A_ : Tuple = processor(
images=np.array(lowerCamelCase) , input_points=lowerCamelCase , input_labels=lowerCamelCase , return_tensors="""pt""").to("""cuda""")
with torch.no_grad():
A_ : Tuple = hf_model(**lowerCamelCase)
A_ : Tuple = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9712_6030_9219_3604
A_ : List[Any] = ((75, 275, 1725, 850),)
A_ : List[str] = processor(images=np.array(lowerCamelCase) , input_boxes=lowerCamelCase , return_tensors="""pt""").to("""cuda""")
with torch.no_grad():
A_ : Union[str, Any] = hf_model(**lowerCamelCase)
A_ : Dict = output.iou_scores.squeeze()
assert scores[-1].item() == 0.8686_0156_0592_6514
# Test with 2 points and 1 image.
A_ : Any = [[[400, 650], [800, 650]]]
A_ : Union[str, Any] = [[1, 1]]
A_ : int = processor(
images=np.array(lowerCamelCase) , input_points=lowerCamelCase , input_labels=lowerCamelCase , return_tensors="""pt""").to("""cuda""")
with torch.no_grad():
A_ : int = hf_model(**lowerCamelCase)
A_ : Tuple = output.iou_scores.squeeze()
assert scores[-1].item() == 0.9936_0477_9243_4692
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
__magic_name__ = ['sam_vit_b_01ec64', 'sam_vit_h_4b8939', 'sam_vit_l_0b3195']
parser.add_argument(
'--model_name',
default='sam_vit_h_4b8939',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
parser.add_argument('--pytorch_dump_folder_path', default=None, type=str, help='Path to the output PyTorch model.')
parser.add_argument(
'--push_to_hub',
action='store_true',
help='Whether to push the model and processor to the hub after converting',
)
parser.add_argument(
'--model_hub_id',
default='ybelkada/segment-anything',
choices=choices,
type=str,
help='Path to hf config.json of model to convert',
)
__magic_name__ = parser.parse_args()
convert_sam_checkpoint(args.model_name, args.pytorch_dump_folder_path, args.push_to_hub, args.model_hub_id)
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 1 |
'''simple docstring'''
import math
def lowerCamelCase ( lowerCamelCase : int):
A_ : Dict = 0
A_ : str = 0
while num > 0:
A_ : List[str] = num % 8
A_ : Any = octal + (remainder * math.floor(math.pow(10 , lowerCamelCase)))
counter += 1
A_ : Dict = math.floor(num / 8) # basically /= 8 without remainder if any
# This formatting removes trailing '.0' from `octal`.
return F'0o{int(lowerCamelCase)}'
def lowerCamelCase ( ):
print("""\n2 in octal is:""")
print(decimal_to_octal(2)) # = 2
print("""\n8 in octal is:""")
print(decimal_to_octal(8)) # = 10
print("""\n65 in octal is:""")
print(decimal_to_octal(65)) # = 101
print("""\n216 in octal is:""")
print(decimal_to_octal(216)) # = 330
print("""\n512 in octal is:""")
print(decimal_to_octal(512)) # = 1000
print("""\n""")
if __name__ == "__main__":
main()
| 27 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyVaaControlnetPipeline
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : Any ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def _a ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Tuple = UNetaDConditionModel(**_a )
return model
@property
def _a ( self : List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.dummy_unet
A_ : int = self.dummy_movq
A_ : Tuple = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,)
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ):
'''simple docstring'''
A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a )
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
A_ : Optional[Any] = torch.manual_seed(_a )
else:
A_ : str = torch.Generator(device=_a ).manual_seed(_a )
A_ : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = """cpu"""
A_ : List[str] = self.get_dummy_components()
A_ : Tuple = self.pipeline_class(**_a )
A_ : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) )
A_ : Tuple = output.images
A_ : Optional[Any] = pipe(
**self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0]
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0
A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_a )
A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa )
A_ : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = """A robot, 4k photo"""
A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ , A_ : List[str] = pipe_prior(
_a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ : List[Any] = pipeline(
image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,)
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a ,_a )
| 27 | 1 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : str = batch_size
A_ : int = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_token_type_ids
A_ : int = use_labels
A_ : Dict = vocab_size
A_ : List[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : int = intermediate_size
A_ : Tuple = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Tuple = type_sequence_label_size
A_ : int = initializer_range
A_ : Optional[Any] = num_labels
A_ : str = num_choices
A_ : Optional[Any] = scope
A_ : List[Any] = self.vocab_size - 1
def _a ( self : Any ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : int = None
A_ : str = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Any = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = OpenAIGPTModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a )
A_ : str = model(_a ,token_type_ids=_a )
A_ : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ):
'''simple docstring'''
A_ : str = OpenAIGPTLMHeadModel(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ):
'''simple docstring'''
A_ : Any = OpenAIGPTDoubleHeadsModel(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : int = OpenAIGPTForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : str = config_and_inputs
A_ : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ):
'''simple docstring'''
A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,)
A_ : Any = inputs_dict["""labels"""]
A_ : Any = inputs_dict["""labels"""]
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,)
A_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = OpenAIGPTModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_a )
A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is
A_ : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : int = model.generate(_a ,do_sample=_a )
self.assertListEqual(output_ids[0].tolist() ,_a )
| 27 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """deberta-v2"""
def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : List[Any] = initializer_range
A_ : int = relative_attention
A_ : Tuple = max_relative_positions
A_ : int = pad_token_id
A_ : Tuple = position_biased_input
# Backwards compatibility
if type(_a ) == str:
A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : Any = pos_att_type
A_ : Optional[int] = vocab_size
A_ : Tuple = layer_norm_eps
A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a )
A_ : Union[str, Any] = pooler_dropout
A_ : List[Any] = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return 12
def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 27 | 1 |
'''simple docstring'''
import unittest
from transformers import AutoTokenizer, NystromformerConfig, is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
NystromformerModel,
)
from transformers.models.nystromformer.modeling_nystromformer import NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple ,_a : Tuple ,_a : Optional[Any]=13 ,_a : Optional[int]=7 ,_a : Optional[Any]=True ,_a : Optional[Any]=True ,_a : List[str]=True ,_a : str=True ,_a : Dict=99 ,_a : Optional[int]=32 ,_a : Optional[int]=5 ,_a : int=4 ,_a : Tuple=37 ,_a : int="gelu" ,_a : Tuple=0.1 ,_a : Union[str, Any]=0.1 ,_a : Optional[Any]=512 ,_a : Dict=16 ,_a : List[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Union[str, Any]=3 ,_a : List[str]=4 ,_a : List[Any]=None ,):
'''simple docstring'''
A_ : Optional[int] = parent
A_ : int = batch_size
A_ : List[Any] = seq_length
A_ : Tuple = is_training
A_ : str = use_input_mask
A_ : int = use_token_type_ids
A_ : Dict = use_labels
A_ : List[str] = vocab_size
A_ : List[str] = hidden_size
A_ : Dict = num_hidden_layers
A_ : str = num_attention_heads
A_ : int = intermediate_size
A_ : Optional[Any] = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : str = max_position_embeddings
A_ : List[Any] = type_vocab_size
A_ : List[str] = type_sequence_label_size
A_ : List[Any] = initializer_range
A_ : int = num_labels
A_ : int = num_choices
A_ : int = scope
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : Optional[int] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : Union[str, Any] = None
A_ : Dict = None
A_ : Tuple = None
if self.use_labels:
A_ : Optional[Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Tuple = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Union[str, Any] = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = self.get_config()
return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels
def _a ( self : List[str] ):
'''simple docstring'''
return NystromformerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
def _a ( self : str ,_a : List[str] ,_a : List[str] ,_a : Optional[Any] ,_a : Optional[Any] ,_a : List[str] ,_a : List[str] ,_a : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = NystromformerModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a ,attention_mask=_a ,token_type_ids=_a )
A_ : Any = model(_a ,token_type_ids=_a )
A_ : Tuple = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Tuple ,_a : List[Any] ,_a : Dict ,_a : Tuple ,_a : Optional[Any] ,_a : Any ,_a : List[Any] ,_a : int ):
'''simple docstring'''
A_ : int = NystromformerForMaskedLM(config=_a )
model.to(_a )
model.eval()
A_ : Tuple = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any ,_a : Optional[Any] ,_a : Dict ,_a : Any ,_a : Optional[int] ,_a : str ,_a : Dict ,_a : List[str] ):
'''simple docstring'''
A_ : List[str] = NystromformerForQuestionAnswering(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(
_a ,attention_mask=_a ,token_type_ids=_a ,start_positions=_a ,end_positions=_a ,)
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _a ( self : str ,_a : Optional[int] ,_a : List[str] ,_a : List[str] ,_a : Any ,_a : Tuple ,_a : Union[str, Any] ,_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = self.num_labels
A_ : Optional[int] = NystromformerForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : List[Any] = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Optional[int] ,_a : List[Any] ,_a : int ,_a : Tuple ,_a : Optional[Any] ,_a : Any ,_a : Union[str, Any] ,_a : Tuple ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : List[Any] = NystromformerForTokenClassification(config=_a )
model.to(_a )
model.eval()
A_ : Dict = model(_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : int ,_a : Tuple ,_a : List[Any] ,_a : Optional[Any] ,_a : Any ,_a : Optional[int] ,_a : Tuple ,_a : Any ):
'''simple docstring'''
A_ : Optional[int] = self.num_choices
A_ : Tuple = NystromformerForMultipleChoice(config=_a )
model.to(_a )
model.eval()
A_ : int = input_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A_ : List[str] = token_type_ids.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A_ : Union[str, Any] = input_mask.unsqueeze(1 ).expand(-1 ,self.num_choices ,-1 ).contiguous()
A_ : Optional[Any] = model(
_a ,attention_mask=_a ,token_type_ids=_a ,labels=_a ,)
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : str = config_and_inputs
A_ : Dict = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(
NystromformerModel,
NystromformerForMaskedLM,
NystromformerForMultipleChoice,
NystromformerForQuestionAnswering,
NystromformerForSequenceClassification,
NystromformerForTokenClassification,
)
if is_torch_available()
else ()
)
a_ = (
{
"""feature-extraction""": NystromformerModel,
"""fill-mask""": NystromformerForMaskedLM,
"""question-answering""": NystromformerForQuestionAnswering,
"""text-classification""": NystromformerForSequenceClassification,
"""token-classification""": NystromformerForTokenClassification,
"""zero-shot""": NystromformerForSequenceClassification,
}
if is_torch_available()
else {}
)
a_ = False
a_ = False
def _a ( self : int ):
'''simple docstring'''
A_ : Any = NystromformerModelTester(self )
A_ : Any = ConfigTester(self ,config_class=_a ,hidden_size=37 )
def _a ( self : List[str] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
for type in ["absolute", "relative_key", "relative_key_query"]:
A_ : List[str] = type
self.model_tester.create_and_check_model(*_a )
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
def _a ( self : Dict ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
def _a ( self : Any ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def _a ( self : int ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
@slow
def _a ( self : int ):
'''simple docstring'''
for model_name in NYSTROMFORMER_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[int] = NystromformerModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : int ):
'''simple docstring'''
A_ : Optional[int] = NystromformerModel.from_pretrained("""uw-madison/nystromformer-512""" )
A_ : int = torch.tensor([[0, 1, 2, 3, 4, 5]] )
with torch.no_grad():
A_ : int = model(_a )[0]
A_ : Union[str, Any] = torch.Size((1, 6, 768) )
self.assertEqual(output.shape ,_a )
A_ : Tuple = torch.tensor(
[[[-0.4532, -0.0936, 0.5137], [-0.2676, 0.0628, 0.6186], [-0.3629, -0.1726, 0.4716]]] )
self.assertTrue(torch.allclose(output[:, :3, :3] ,_a ,atol=1e-4 ) )
@slow
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Tuple = """the [MASK] of Belgium is Brussels"""
A_ : List[Any] = AutoTokenizer.from_pretrained("""uw-madison/nystromformer-512""" )
A_ : List[Any] = NystromformerForMaskedLM.from_pretrained("""uw-madison/nystromformer-512""" )
A_ : Tuple = tokenizer(_a ,return_tensors="""pt""" )
with torch.no_grad():
A_ : Union[str, Any] = model(encoding.input_ids ).logits
A_ : str = token_logits[:, 2, :].argmax(-1 )[0]
self.assertEqual(tokenizer.decode(_a ) ,"""capital""" )
| 27 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__magic_name__ = BeautifulSoup(res.text, 'html.parser')
__magic_name__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 27 | 1 |
'''simple docstring'''
import copy
import os
from typing import Union
from ...configuration_utils import PretrainedConfig
from ...models.auto.modeling_auto import MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
from ...utils import logging
from ..auto import CONFIG_MAPPING
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'salesforce/blip2-opt-2.7b': 'https://huggingface.co/salesforce/blip2-opt-2.7b/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """blip_2_vision_model"""
def __init__( self : List[str] ,_a : List[str]=1408 ,_a : str=6144 ,_a : int=39 ,_a : Tuple=16 ,_a : str=224 ,_a : Optional[Any]=14 ,_a : Tuple="gelu" ,_a : Dict=0.00001 ,_a : str=0.0 ,_a : Optional[Any]=1e-10 ,_a : Tuple=True ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Dict = hidden_size
A_ : Optional[int] = intermediate_size
A_ : Any = num_hidden_layers
A_ : Dict = num_attention_heads
A_ : Union[str, Any] = patch_size
A_ : Optional[int] = image_size
A_ : List[Any] = initializer_range
A_ : Dict = attention_dropout
A_ : List[str] = layer_norm_eps
A_ : Union[str, Any] = hidden_act
A_ : Dict = qkv_bias
@classmethod
def _a ( cls : int ,_a : Union[str, os.PathLike] ,**_a : Any ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : str = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
A_ : Tuple = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """blip_2_qformer"""
def __init__( self : List[str] ,_a : List[Any]=30522 ,_a : Any=768 ,_a : str=12 ,_a : str=12 ,_a : Any=3072 ,_a : Any="gelu" ,_a : Optional[int]=0.1 ,_a : List[str]=0.1 ,_a : Tuple=512 ,_a : List[str]=0.02 ,_a : Union[str, Any]=1e-12 ,_a : Optional[Any]=0 ,_a : Tuple="absolute" ,_a : Union[str, Any]=2 ,_a : Union[str, Any]=1408 ,**_a : int ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,**_a )
A_ : List[str] = vocab_size
A_ : Optional[int] = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : List[str] = hidden_act
A_ : Optional[Any] = intermediate_size
A_ : Any = hidden_dropout_prob
A_ : Any = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Union[str, Any] = initializer_range
A_ : List[Any] = layer_norm_eps
A_ : Optional[Any] = position_embedding_type
A_ : Optional[int] = cross_attention_frequency
A_ : List[str] = encoder_hidden_size
@classmethod
def _a ( cls : Optional[Any] ,_a : Union[str, os.PathLike] ,**_a : Tuple ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : Any = cls.get_config_dict(_a ,**_a )
# get the qformer config dict if we are loading from Blip2Config
if config_dict.get("""model_type""" ) == "blip-2":
A_ : Tuple = config_dict["""qformer_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """blip-2"""
a_ = True
def __init__( self : str ,_a : Union[str, Any]=None ,_a : Any=None ,_a : str=None ,_a : List[str]=32 ,**_a : Union[str, Any] ):
'''simple docstring'''
super().__init__(**_a )
if vision_config is None:
A_ : int = {}
logger.info("""vision_config is None. initializing the Blip2VisionConfig with default values.""" )
if qformer_config is None:
A_ : Tuple = {}
logger.info("""qformer_config is None. Initializing the Blip2QFormerConfig with default values.""" )
if text_config is None:
A_ : str = {}
logger.info("""text_config is None. Initializing the text config with default values (`OPTConfig`).""" )
A_ : Tuple = BlipaVisionConfig(**_a )
A_ : str = BlipaQFormerConfig(**_a )
A_ : List[Any] = text_config["""model_type"""] if """model_type""" in text_config else """opt"""
A_ : Dict = CONFIG_MAPPING[text_model_type](**_a )
A_ : Union[str, Any] = self.text_config.tie_word_embeddings
A_ : int = self.text_config.is_encoder_decoder
A_ : Optional[int] = num_query_tokens
A_ : List[Any] = self.vision_config.hidden_size
A_ : Optional[Any] = self.text_config.model_type in MODEL_FOR_CAUSAL_LM_MAPPING_NAMES
A_ : List[Any] = 1.0
A_ : Optional[int] = 0.02
@classmethod
def _a ( cls : List[Any] ,_a : BlipaVisionConfig ,_a : BlipaQFormerConfig ,_a : PretrainedConfig ,**_a : Tuple ,):
'''simple docstring'''
return cls(
vision_config=vision_config.to_dict() ,qformer_config=qformer_config.to_dict() ,text_config=text_config.to_dict() ,**_a ,)
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = copy.deepcopy(self.__dict__ )
A_ : Optional[int] = self.vision_config.to_dict()
A_ : Optional[Any] = self.qformer_config.to_dict()
A_ : str = self.text_config.to_dict()
A_ : List[Any] = self.__class__.model_type
return output
| 27 |
'''simple docstring'''
from ... import PretrainedConfig
__magic_name__ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a_ = """nezha"""
def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Any = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : List[Any] = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Optional[Any] = max_relative_position
A_ : List[Any] = type_vocab_size
A_ : int = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Dict = classifier_dropout
A_ : int = use_cache
| 27 | 1 |
'''simple docstring'''
import argparse
import datetime
def lowerCamelCase ( lowerCamelCase : str):
A_ : str = {
"""0""": """Sunday""",
"""1""": """Monday""",
"""2""": """Tuesday""",
"""3""": """Wednesday""",
"""4""": """Thursday""",
"""5""": """Friday""",
"""6""": """Saturday""",
}
A_ : Optional[int] = {0: 1, 1: 2, 2: 3, 3: 4, 4: 5, 5: 6, 6: 0}
# Validate
if not 0 < len(lowerCamelCase) < 11:
raise ValueError("""Must be 10 characters long""")
# Get month
A_ : int = int(date_input[0] + date_input[1])
# Validate
if not 0 < m < 13:
raise ValueError("""Month must be between 1 - 12""")
A_ : str = date_input[2]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""")
# Get day
A_ : int = int(date_input[3] + date_input[4])
# Validate
if not 0 < d < 32:
raise ValueError("""Date must be between 1 - 31""")
# Get second separator
A_ : str = date_input[5]
# Validate
if sep_a not in ["-", "/"]:
raise ValueError("""Date separator must be '-' or '/'""")
# Get year
A_ : int = int(date_input[6] + date_input[7] + date_input[8] + date_input[9])
# Arbitrary year range
if not 45 < y < 8500:
raise ValueError(
"""Year out of range. There has to be some sort of limit...right?""")
# Get datetime obj for validation
A_ : Union[str, Any] = datetime.date(int(lowerCamelCase) , int(lowerCamelCase) , int(lowerCamelCase))
# Start math
if m <= 2:
A_ : Any = y - 1
A_ : List[Any] = m + 12
# maths var
A_ : int = int(str(lowerCamelCase)[:2])
A_ : int = int(str(lowerCamelCase)[2:])
A_ : int = int(2.6 * m - 5.39)
A_ : int = int(c / 4)
A_ : int = int(k / 4)
A_ : int = int(d + k)
A_ : int = int(t + u + v + x)
A_ : int = int(z - (2 * c))
A_ : int = round(w % 7)
# End math
# Validate math
if f != convert_datetime_days[dt_ck.weekday()]:
raise AssertionError("""The date was evaluated incorrectly. Contact developer.""")
# Response
A_ : str = F'Your date {date_input}, is a {days[str(lowerCamelCase)]}!'
return response
if __name__ == "__main__":
import doctest
doctest.testmod()
__magic_name__ = argparse.ArgumentParser(
description=(
'Find out what day of the week nearly any date is or was. Enter '
'date as a string in the mm-dd-yyyy or mm/dd/yyyy format'
)
)
parser.add_argument(
'date_input', type=str, help='Date as a string (mm-dd-yyyy or mm/dd/yyyy)'
)
__magic_name__ = parser.parse_args()
zeller(args.date_input)
| 27 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str):
A_ , A_ : List[Any] = set(lowerCamelCase), [start]
while stack:
A_ : Optional[Any] = stack.pop()
explored.add(lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(lowerCamelCase)
return explored
__magic_name__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 27 | 1 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'openai/imagegpt-small': '',
'openai/imagegpt-medium': '',
'openai/imagegpt-large': '',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """imagegpt"""
a_ = ["""past_key_values"""]
a_ = {
"""hidden_size""": """n_embd""",
"""max_position_embeddings""": """n_positions""",
"""num_attention_heads""": """n_head""",
"""num_hidden_layers""": """n_layer""",
}
def __init__( self : str ,_a : int=512 + 1 ,_a : List[str]=32 * 32 ,_a : int=512 ,_a : Optional[int]=24 ,_a : List[str]=8 ,_a : Dict=None ,_a : Optional[Any]="quick_gelu" ,_a : str=0.1 ,_a : Optional[Any]=0.1 ,_a : List[str]=0.1 ,_a : str=1e-5 ,_a : Union[str, Any]=0.02 ,_a : int=True ,_a : Optional[int]=True ,_a : Optional[Any]=False ,_a : List[Any]=False ,_a : str=False ,**_a : Tuple ,):
'''simple docstring'''
A_ : Optional[int] = vocab_size
A_ : List[Any] = n_positions
A_ : Any = n_embd
A_ : List[Any] = n_layer
A_ : Optional[int] = n_head
A_ : Union[str, Any] = n_inner
A_ : Tuple = activation_function
A_ : Union[str, Any] = resid_pdrop
A_ : Union[str, Any] = embd_pdrop
A_ : Tuple = attn_pdrop
A_ : List[Any] = layer_norm_epsilon
A_ : Union[str, Any] = initializer_range
A_ : Any = scale_attn_weights
A_ : int = use_cache
A_ : Tuple = scale_attn_by_inverse_layer_idx
A_ : Optional[Any] = reorder_and_upcast_attn
A_ : Dict = tie_word_embeddings
super().__init__(tie_word_embeddings=_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : List[str] ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
] )
def _a ( self : int ,_a : "FeatureExtractionMixin" ,_a : int = 1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 32 ,_a : int = 32 ,):
'''simple docstring'''
A_ : Any = self._generate_dummy_images(_a ,_a ,_a ,_a )
A_ : List[Any] = dict(preprocessor(images=_a ,return_tensors=_a ) )
return inputs
| 27 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A_ : Union[str, Any] = [144, 192, 240]
A_ : int = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
A_ : List[str] = [96, 120, 144]
A_ : Any = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
A_ : Any = [64, 80, 96]
A_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
A_ : Any = 0.05
A_ : List[Any] = 2.0
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : int = 512
A_ : Optional[int] = 16
A_ : List[Any] = 21
A_ : List[str] = """pascal-voc-id2label.json"""
else:
A_ : str = 1000
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = """huggingface/label-files"""
A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False):
for i in range(1 , 6):
if F'layer_{i}.' in name:
A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.')
if "conv_1." in name:
A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""")
if ".block." in name:
A_ : Optional[Any] = name.replace(""".block.""" , """.""")
if "exp_1x1" in name:
A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""")
if "red_1x1" in name:
A_ : int = name.replace("""red_1x1""" , """reduce_1x1""")
if ".local_rep.conv_3x3." in name:
A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""")
if ".local_rep.conv_1x1." in name:
A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""")
if ".norm." in name:
A_ : Tuple = name.replace(""".norm.""" , """.normalization.""")
if ".conv." in name:
A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""")
if ".conv_proj." in name:
A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""")
for i in range(0 , 2):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.')
for i in range(2 , 6):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.')
if "expand_1x1" in name:
A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""")
if "conv_3x3" in name:
A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""")
if "reduce_1x1" in name:
A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""")
for i in range(2 , 5):
if F'.global_rep.{i}.weight' in name:
A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""")
if F'.global_rep.{i}.bias' in name:
A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""")
if ".global_rep." in name:
A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""")
if ".pre_norm_mha.0." in name:
A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""")
if ".pre_norm_mha.1.out_proj." in name:
A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""")
if ".pre_norm_ffn.0." in name:
A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""")
if ".pre_norm_ffn.1." in name:
A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""")
if ".pre_norm_ffn.4." in name:
A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""")
if ".transformer." in name:
A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""")
if ".aspp_layer." in name:
A_ : int = name.replace(""".aspp_layer.""" , """.""")
if ".aspp_pool." in name:
A_ : Tuple = name.replace(""".aspp_pool.""" , """.""")
if "seg_head." in name:
A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""")
if "segmentation_head.classifier.classifier." in name:
A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""")
if "classifier.fc." in name:
A_ : str = name.replace("""classifier.fc.""" , """classifier.""")
elif (not base_model) and ("segmentation_head." not in name):
A_ : str = """mobilevit.""" + name
return name
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False):
if base_model:
A_ : Dict = """"""
else:
A_ : Any = """mobilevit."""
for key in orig_state_dict.copy().keys():
A_ : List[Any] = orig_state_dict.pop(lowerCamelCase)
if key[:8] == "encoder.":
A_ : int = key[8:]
if "qkv" in key:
A_ : Any = key.split(""".""")
A_ : str = int(key_split[0][6:]) - 1
A_ : int = int(key_split[3])
A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}')
A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A_ : Optional[Any] = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
A_ : Dict = val[:dim, :]
A_ : Optional[int] = val[dim : dim * 2, :]
A_ : List[Any] = val[-dim:, :]
else:
A_ : Optional[Any] = val[:dim]
A_ : List[Any] = val[dim : dim * 2]
A_ : Any = val[-dim:]
else:
A_ : List[str] = val
return orig_state_dict
def lowerCamelCase ( ):
A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False):
A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase)
# load original state_dict
A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""")
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval()
else:
A_ : str = MobileViTForImageClassification(lowerCamelCase).eval()
A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase)
model.load_state_dict(lowerCamelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""")
A_ : List[Any] = model(**lowerCamelCase)
A_ : Dict = outputs.logits
if mobilevit_name.startswith("""deeplabv3_"""):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A_ : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A_ : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A_ : Tuple = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4)
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241])
elif mobilevit_name == "mobilevit_xs":
A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587])
elif mobilevit_name == "mobilevit_xxs":
A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if push_to_hub:
A_ : str = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""")
A_ : Union[str, Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCamelCase , organization="""apple""")
model.push_to_hub(lowerCamelCase , organization="""apple""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__magic_name__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 27 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : list[int] , lowerCamelCase : str):
A_ : str = int(lowerCamelCase)
# Initialize Result
A_ : Dict = []
# Traverse through all denomination
for denomination in reversed(lowerCamelCase):
# Find denominations
while int(lowerCamelCase) >= int(lowerCamelCase):
total_value -= int(lowerCamelCase)
answer.append(lowerCamelCase) # Append the "answers" array
return answer
# Driver Code
if __name__ == "__main__":
__magic_name__ = []
__magic_name__ = '0'
if (
input('Do you want to enter your denominations ? (yY/n): ').strip().lower()
== "y"
):
__magic_name__ = int(input('Enter the number of denominations you want to add: ').strip())
for i in range(0, n):
denominations.append(int(input(f"""Denomination {i}: """).strip()))
__magic_name__ = input('Enter the change you want to make in Indian Currency: ').strip()
else:
# All denominations of Indian Currency if user does not enter
__magic_name__ = [1, 2, 5, 10, 20, 50, 100, 500, 2_000]
__magic_name__ = input('Enter the change you want to make: ').strip()
if int(value) == 0 or int(value) < 0:
print('The total value cannot be zero or negative.')
else:
print(f"""Following is minimal change for {value}: """)
__magic_name__ = find_minimum_change(denominations, value)
# Print result
for i in range(len(answer)):
print(answer[i], end=' ')
| 27 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Tuple = size if size is not None else {"""shortest_edge""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" )
A_ : Any = do_resize
A_ : List[str] = size
A_ : Union[str, Any] = resample
A_ : Dict = do_center_crop
A_ : List[str] = crop_size
A_ : Any = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Any = do_normalize
A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Tuple = do_convert_rgb
def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a )
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : Optional[int] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,):
'''simple docstring'''
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,):
'''simple docstring'''
A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A_ : Tuple = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a )
A_ : List[str] = resample if resample is not None else self.resample
A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a )
A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Any = do_normalize if do_normalize is not None else self.do_normalize
A_ : int = image_mean if image_mean is not None else self.image_mean
A_ : int = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : Optional[int] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
A_ : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images]
if do_center_crop:
A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images]
if do_normalize:
A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images]
A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images]
A_ : List[str] = {"""pixel_values""": images}
return BatchFeature(data=_a ,tensor_type=_a )
| 27 | 1 |
'''simple docstring'''
from __future__ import annotations
import unittest
from transformers import FunnelConfig, is_tf_available
from transformers.testing_utils import require_tf
from ...test_configuration_common import ConfigTester
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor, random_attention_mask
from ...test_pipeline_mixin import PipelineTesterMixin
if is_tf_available():
import tensorflow as tf
from transformers import (
TFFunnelBaseModel,
TFFunnelForMaskedLM,
TFFunnelForMultipleChoice,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForSequenceClassification,
TFFunnelForTokenClassification,
TFFunnelModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : Union[str, Any] ,_a : List[Any]=13 ,_a : Any=7 ,_a : Tuple=True ,_a : Tuple=True ,_a : List[Any]=True ,_a : List[str]=True ,_a : int=99 ,_a : Any=[1, 1, 2] ,_a : Union[str, Any]=1 ,_a : Optional[Any]=32 ,_a : Optional[int]=4 ,_a : List[Any]=8 ,_a : List[str]=37 ,_a : Optional[int]="gelu_new" ,_a : Any=0.1 ,_a : Optional[Any]=0.1 ,_a : List[Any]=0.0 ,_a : List[str]=512 ,_a : List[Any]=3 ,_a : Any=0.02 ,_a : List[Any]=3 ,_a : Optional[Any]=4 ,_a : List[str]=None ,_a : List[Any]=False ,):
'''simple docstring'''
A_ : Any = parent
A_ : List[Any] = batch_size
A_ : Union[str, Any] = seq_length
A_ : List[Any] = is_training
A_ : List[Any] = use_input_mask
A_ : Tuple = use_token_type_ids
A_ : Any = use_labels
A_ : str = vocab_size
A_ : int = block_sizes
A_ : Dict = num_decoder_layers
A_ : Dict = d_model
A_ : Tuple = n_head
A_ : Tuple = d_head
A_ : str = d_inner
A_ : Dict = hidden_act
A_ : List[Any] = hidden_dropout
A_ : Union[str, Any] = attention_dropout
A_ : int = activation_dropout
A_ : Any = max_position_embeddings
A_ : Any = type_vocab_size
A_ : Optional[Any] = 2
A_ : Dict = num_labels
A_ : str = num_choices
A_ : int = scope
A_ : List[Any] = initializer_std
# Used in the tests to check the size of the first attention layer
A_ : Optional[int] = n_head
# Used in the tests to check the size of the first hidden state
A_ : List[str] = self.d_model
# Used in the tests to check the number of output hidden states/attentions
A_ : List[Any] = sum(self.block_sizes ) + (0 if base else self.num_decoder_layers)
# FunnelModel adds two hidden layers: input embeddings and the sum of the upsampled encoder hidden state with
# the last hidden state of the first block (which is the first hidden state of the decoder).
if not base:
A_ : int = self.num_hidden_layers + 2
def _a ( self : Any ):
'''simple docstring'''
A_ : List[str] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : int = None
if self.use_input_mask:
A_ : Union[str, Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : Any = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : Optional[Any] = None
A_ : Union[str, Any] = None
A_ : Optional[int] = None
if self.use_labels:
A_ : Union[str, Any] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : int = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = FunnelConfig(
vocab_size=self.vocab_size ,block_sizes=self.block_sizes ,num_decoder_layers=self.num_decoder_layers ,d_model=self.d_model ,n_head=self.n_head ,d_head=self.d_head ,d_inner=self.d_inner ,hidden_act=self.hidden_act ,hidden_dropout=self.hidden_dropout ,attention_dropout=self.attention_dropout ,activation_dropout=self.activation_dropout ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,initializer_std=self.initializer_std ,)
return (
config,
input_ids,
token_type_ids,
input_mask,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self : Dict ,_a : Optional[int] ,_a : List[Any] ,_a : int ,_a : Optional[int] ,_a : List[str] ,_a : List[str] ,_a : List[Any] ,):
'''simple docstring'''
A_ : Tuple = TFFunnelModel(config=_a )
A_ : Tuple = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : int = model(_a )
A_ : Tuple = [input_ids, input_mask]
A_ : Dict = model(_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
A_ : Optional[Any] = False
A_ : Tuple = TFFunnelModel(config=_a )
A_ : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
A_ : int = False
A_ : List[Any] = TFFunnelModel(config=_a )
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.d_model) )
def _a ( self : int ,_a : Any ,_a : Any ,_a : List[str] ,_a : Tuple ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Optional[Any] ,):
'''simple docstring'''
A_ : Dict = TFFunnelBaseModel(config=_a )
A_ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : List[Any] = model(_a )
A_ : int = [input_ids, input_mask]
A_ : Any = model(_a )
A_ : List[str] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
A_ : Dict = False
A_ : Union[str, Any] = TFFunnelBaseModel(config=_a )
A_ : Optional[Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 3, self.d_model) )
A_ : List[Any] = False
A_ : Any = TFFunnelBaseModel(config=_a )
A_ : Union[str, Any] = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, 2, self.d_model) )
def _a ( self : Tuple ,_a : Optional[Any] ,_a : List[str] ,_a : List[str] ,_a : Optional[Any] ,_a : List[Any] ,_a : Optional[int] ,_a : List[str] ,):
'''simple docstring'''
A_ : int = TFFunnelForPreTraining(config=_a )
A_ : List[Any] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : Optional[int] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length) )
def _a ( self : str ,_a : int ,_a : List[str] ,_a : List[str] ,_a : List[str] ,_a : Optional[int] ,_a : Optional[Any] ,_a : Union[str, Any] ,):
'''simple docstring'''
A_ : Optional[Any] = TFFunnelForMaskedLM(config=_a )
A_ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : int = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[Any] ,_a : Tuple ,_a : str ,_a : List[str] ,_a : Optional[Any] ,_a : int ,_a : Optional[int] ,_a : Tuple ,):
'''simple docstring'''
A_ : Union[str, Any] = self.num_labels
A_ : Any = TFFunnelForSequenceClassification(config=_a )
A_ : List[str] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : List[str] = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Tuple ,_a : Optional[Any] ,_a : Optional[Any] ,_a : List[str] ,_a : Tuple ,_a : Tuple ,_a : List[str] ,_a : Any ,):
'''simple docstring'''
A_ : Union[str, Any] = self.num_choices
A_ : Union[str, Any] = TFFunnelForMultipleChoice(config=_a )
A_ : Union[str, Any] = tf.tile(tf.expand_dims(_a ,1 ) ,(1, self.num_choices, 1) )
A_ : int = tf.tile(tf.expand_dims(_a ,1 ) ,(1, self.num_choices, 1) )
A_ : List[Any] = tf.tile(tf.expand_dims(_a ,1 ) ,(1, self.num_choices, 1) )
A_ : Tuple = {
"""input_ids""": multiple_choice_inputs_ids,
"""attention_mask""": multiple_choice_input_mask,
"""token_type_ids""": multiple_choice_token_type_ids,
}
A_ : Tuple = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_choices) )
def _a ( self : Tuple ,_a : Dict ,_a : Optional[int] ,_a : str ,_a : List[Any] ,_a : Optional[int] ,_a : Tuple ,_a : Tuple ,):
'''simple docstring'''
A_ : Tuple = self.num_labels
A_ : Union[str, Any] = TFFunnelForTokenClassification(config=_a )
A_ : str = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : int = model(_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.num_labels) )
def _a ( self : Any ,_a : Optional[Any] ,_a : int ,_a : Union[str, Any] ,_a : Dict ,_a : Tuple ,_a : Union[str, Any] ,_a : Tuple ,):
'''simple docstring'''
A_ : Optional[Any] = TFFunnelForQuestionAnswering(config=_a )
A_ : Optional[int] = {"""input_ids""": input_ids, """attention_mask""": input_mask, """token_type_ids""": token_type_ids}
A_ : List[Any] = model(_a )
self.parent.assertEqual(result.start_logits.shape ,(self.batch_size, self.seq_length) )
self.parent.assertEqual(result.end_logits.shape ,(self.batch_size, self.seq_length) )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Any = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : List[str] = config_and_inputs
A_ : str = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": input_mask}
return config, inputs_dict
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(
TFFunnelModel,
TFFunnelForMaskedLM,
TFFunnelForPreTraining,
TFFunnelForQuestionAnswering,
TFFunnelForTokenClassification,
)
if is_tf_available()
else ()
)
a_ = (
{
"""feature-extraction""": (TFFunnelBaseModel, TFFunnelModel),
"""fill-mask""": TFFunnelForMaskedLM,
"""question-answering""": TFFunnelForQuestionAnswering,
"""text-classification""": TFFunnelForSequenceClassification,
"""token-classification""": TFFunnelForTokenClassification,
"""zero-shot""": TFFunnelForSequenceClassification,
}
if is_tf_available()
else {}
)
a_ = False
a_ = False
def _a ( self : Any ):
'''simple docstring'''
A_ : int = TFFunnelModelTester(self )
A_ : List[Any] = ConfigTester(self ,config_class=_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : int ):
'''simple docstring'''
A_ : Tuple = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Any = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_masked_lm(*_a )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_token_classification(*_a )
def _a ( self : int ):
'''simple docstring'''
A_ : List[str] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_question_answering(*_a )
@require_tf
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(TFFunnelBaseModel, TFFunnelForMultipleChoice, TFFunnelForSequenceClassification) if is_tf_available() else ()
)
a_ = False
a_ = False
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Dict = TFFunnelModelTester(self ,base=_a )
A_ : List[Any] = ConfigTester(self ,config_class=_a )
def _a ( self : str ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_base_model(*_a )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_sequence_classification(*_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_multiple_choice(*_a )
| 27 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ):
'''simple docstring'''
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 27 | 1 |
'''simple docstring'''
import importlib
import os
import fsspec
import pytest
from fsspec import register_implementation
from fsspec.registry import _registry as _fsspec_registry
from datasets.filesystems import COMPRESSION_FILESYSTEMS, HfFileSystem, extract_path_from_uri, is_remote_filesystem
from .utils import require_lza, require_zstandard
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
assert "mock" in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase ( ):
assert "mock" not in _fsspec_registry
assert "bz2" in _fsspec_registry
def lowerCamelCase ( ):
A_ : Dict = """mock-s3-bucket"""
A_ : str = F's3://{mock_bucket}'
A_ : List[Any] = extract_path_from_uri(lowerCamelCase)
assert dataset_path.startswith("""s3://""") is False
A_ : List[str] = """./local/path"""
A_ : Any = extract_path_from_uri(lowerCamelCase)
assert dataset_path == new_dataset_path
def lowerCamelCase ( lowerCamelCase : int):
A_ : Union[str, Any] = is_remote_filesystem(lowerCamelCase)
assert is_remote is True
A_ : Optional[Any] = fsspec.filesystem("""file""")
A_ : Optional[int] = is_remote_filesystem(lowerCamelCase)
assert is_remote is False
@pytest.mark.parametrize("""compression_fs_class""" , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : List[str] , lowerCamelCase : Any):
A_ : Any = {"""gzip""": gz_file, """xz""": xz_file, """zstd""": zstd_file, """bz2""": bza_file, """lz4""": lza_file}
A_ : Dict = input_paths[compression_fs_class.protocol]
if input_path is None:
A_ : Optional[int] = F'for \'{compression_fs_class.protocol}\' compression protocol, '
if compression_fs_class.protocol == "lz4":
reason += require_lza.kwargs["reason"]
elif compression_fs_class.protocol == "zstd":
reason += require_zstandard.kwargs["reason"]
pytest.skip(lowerCamelCase)
A_ : List[Any] = fsspec.filesystem(compression_fs_class.protocol , fo=lowerCamelCase)
assert isinstance(lowerCamelCase , lowerCamelCase)
A_ : List[Any] = os.path.basename(lowerCamelCase)
A_ : Optional[int] = expected_filename[: expected_filename.rindex(""".""")]
assert fs.glob("""*""") == [expected_filename]
with fs.open(lowerCamelCase , """r""" , encoding="""utf-8""") as f, open(lowerCamelCase , encoding="""utf-8""") as expected_file:
assert f.read() == expected_file.read()
@pytest.mark.parametrize("""protocol""" , ["""zip""", """gzip"""])
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Dict , lowerCamelCase : Optional[Any]):
A_ : Union[str, Any] = {"""zip""": zip_jsonl_path, """gzip""": jsonl_gz_path}
A_ : Any = compressed_file_paths[protocol]
A_ : List[Any] = """dataset.jsonl"""
A_ : List[str] = F'{protocol}://{member_file_path}::{compressed_file_path}'
A_ , *A_ : int = fsspec.get_fs_token_paths(lowerCamelCase)
assert fs.isfile(lowerCamelCase)
assert not fs.isfile("""non_existing_""" + member_file_path)
@pytest.mark.integration
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : Tuple , lowerCamelCase : List[Any]):
A_ : Optional[Any] = hf_api.dataset_info(lowerCamelCase , token=lowerCamelCase)
A_ : Optional[int] = HfFileSystem(repo_info=lowerCamelCase , token=lowerCamelCase)
assert sorted(hffs.glob("""*""")) == [".gitattributes", "data"]
assert hffs.isdir("""data""")
assert hffs.isfile(""".gitattributes""") and hffs.isfile("""data/text_data.txt""")
with open(lowerCamelCase) as f:
assert hffs.open("""data/text_data.txt""" , """r""").read() == f.read()
def lowerCamelCase ( ):
A_ : Union[str, Any] = """bz2"""
# Import module
import datasets.filesystems
# Overwrite protocol and reload
register_implementation(lowerCamelCase , lowerCamelCase , clobber=lowerCamelCase)
with pytest.warns(lowerCamelCase) as warning_info:
importlib.reload(datasets.filesystems)
assert len(lowerCamelCase) == 1
assert (
str(warning_info[0].message)
== F'A filesystem protocol was already set for {protocol} and will be overwritten.'
)
| 27 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ):
A_ : int = symbols(lowerCamelCase)
A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase)
A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase))
A_ : str = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError("""Could not find root""") from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
A_ : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 27 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers.testing_utils import require_torch, require_vision
from transformers.utils import is_torch_available, is_vision_available
from ...test_image_processing_common import ImageProcessingSavingTestMixin, prepare_image_inputs
if is_torch_available():
import torch
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict ,_a : Optional[Any] ,_a : Optional[Any]=13 ,_a : Any=3 ,_a : Optional[int]=224 ,_a : Optional[int]=30 ,_a : List[str]=400 ,_a : Dict=True ,_a : int=None ,_a : Union[str, Any]=True ,_a : Optional[Any]=[0.5, 0.5, 0.5] ,_a : int=[0.5, 0.5, 0.5] ,):
'''simple docstring'''
A_ : int = size if size is not None else {"""height""": 18, """width""": 18}
A_ : str = parent
A_ : int = batch_size
A_ : str = num_channels
A_ : Optional[int] = image_size
A_ : List[Any] = min_resolution
A_ : Dict = max_resolution
A_ : str = do_resize
A_ : Union[str, Any] = size
A_ : Optional[int] = do_normalize
A_ : Any = image_mean
A_ : Any = image_std
def _a ( self : str ):
'''simple docstring'''
return {
"image_mean": self.image_mean,
"image_std": self.image_std,
"do_normalize": self.do_normalize,
"do_resize": self.do_resize,
"size": self.size,
}
@require_torch
@require_vision
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = ViTImageProcessor if is_vision_available() else None
def _a ( self : Any ):
'''simple docstring'''
A_ : Union[str, Any] = EfficientFormerImageProcessorTester(self )
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return self.image_proc_tester.prepare_image_processor_dict()
def _a ( self : Any ):
'''simple docstring'''
A_ : List[str] = self.image_processing_class(**self.image_processor_dict )
self.assertTrue(hasattr(_a ,"""image_mean""" ) )
self.assertTrue(hasattr(_a ,"""image_std""" ) )
self.assertTrue(hasattr(_a ,"""do_normalize""" ) )
self.assertTrue(hasattr(_a ,"""do_resize""" ) )
self.assertTrue(hasattr(_a ,"""size""" ) )
def _a ( self : Any ):
'''simple docstring'''
pass
def _a ( self : str ):
'''simple docstring'''
A_ : str = self.image_processing_class(**self.image_processor_dict )
# create random PIL images
A_ : int = prepare_image_inputs(self.image_proc_tester ,equal_resolution=_a )
for image in image_inputs:
self.assertIsInstance(_a ,Image.Image )
# Test not batched input
A_ : Optional[int] = image_processor(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) ,)
# Test batched
A_ : Dict = image_processor(_a ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) ,)
def _a ( self : str ):
'''simple docstring'''
A_ : Union[str, Any] = self.image_processing_class(**self.image_processor_dict )
# create random numpy tensors
A_ : List[str] = prepare_image_inputs(self.image_proc_tester ,equal_resolution=_a ,numpify=_a )
for image in image_inputs:
self.assertIsInstance(_a ,np.ndarray )
# Test not batched input
A_ : Dict = image_processor(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) ,)
# Test batched
A_ : Optional[Any] = image_processor(_a ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) ,)
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : List[Any] = self.image_processing_class(**self.image_processor_dict )
# create random PyTorch tensors
A_ : Optional[Any] = prepare_image_inputs(self.image_proc_tester ,equal_resolution=_a ,torchify=_a )
for image in image_inputs:
self.assertIsInstance(_a ,torch.Tensor )
# Test not batched input
A_ : Optional[Any] = image_processor(image_inputs[0] ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
1,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) ,)
# Test batched
A_ : List[str] = image_processor(_a ,return_tensors="""pt""" ).pixel_values
self.assertEqual(
encoded_images.shape ,(
self.image_proc_tester.batch_size,
self.image_proc_tester.num_channels,
self.image_proc_tester.size["""height"""],
self.image_proc_tester.size["""width"""],
) ,)
| 27 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_a : Dict ):
'''simple docstring'''
super().__init__()
A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a )
A_ : int = list(model.children() )[:-2]
A_ : int = nn.Sequential(*_a )
A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self : str ,_a : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.pool(self.model(_a ) )
A_ : Any = torch.flatten(_a ,start_dim=2 )
A_ : str = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Dict = [json.loads(_a ) for l in open(_a )]
A_ : Optional[int] = os.path.dirname(_a )
A_ : Optional[Any] = tokenizer
A_ : Optional[Any] = labels
A_ : List[Any] = len(_a )
A_ : str = max_seq_length
A_ : str = transforms
def __len__( self : str ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) )
A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1]
A_ : Optional[int] = sentence[: self.max_seq_length]
A_ : Any = torch.zeros(self.n_classes )
A_ : Tuple = 1
A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" )
A_ : Union[str, Any] = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( lowerCamelCase : str):
A_ : List[Any] = [len(row["""sentence"""]) for row in batch]
A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase)
A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)):
A_ : str = input_row["""sentence"""]
A_ : Tuple = 1
A_ : int = torch.stack([row["""image"""] for row in batch])
A_ : str = torch.stack([row["""label"""] for row in batch])
A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch])
A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
])
| 27 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : list[int]):
A_ : str = []
if len(lowerCamelCase) == 1:
return [nums.copy()]
for _ in range(len(lowerCamelCase)):
A_ : Optional[Any] = nums.pop(0)
A_ : Tuple = permute(lowerCamelCase)
for perm in permutations:
perm.append(lowerCamelCase)
result.extend(lowerCamelCase)
nums.append(lowerCamelCase)
return result
def lowerCamelCase ( lowerCamelCase : Dict):
def backtrack(lowerCamelCase : Tuple):
if start == len(lowerCamelCase) - 1:
output.append(nums[:])
else:
for i in range(lowerCamelCase , len(lowerCamelCase)):
A_ , A_ : Any = nums[i], nums[start]
backtrack(start + 1)
A_ , A_ : List[str] = nums[i], nums[start] # backtrack
A_ : List[str] = []
backtrack(0)
return output
if __name__ == "__main__":
import doctest
# use res to print the data in permute2 function
__magic_name__ = permutea([1, 2, 3])
print(res)
doctest.testmod()
| 27 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCamelCase : int):
if num <= 0:
A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCamelCase)
A_ : str = [True] * (num + 1)
A_ : Tuple = []
A_ : str = 2
A_ : Any = int(math.sqrt(lowerCamelCase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase)
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(lowerCamelCase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 27 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCamelCase : int):
if num <= 0:
A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCamelCase)
A_ : str = [True] * (num + 1)
A_ : Tuple = []
A_ : str = 2
A_ : Any = int(math.sqrt(lowerCamelCase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase)
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(lowerCamelCase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 27 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__magic_name__ = trt.Logger(trt.Logger.WARNING)
__magic_name__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__magic_name__ = parser.parse_args()
if args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__magic_name__ = args.per_device_eval_batch_size
__magic_name__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__magic_name__ = True
__magic_name__ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__magic_name__ = 'temp_engine/bert-fp16.engine'
if args.inta:
__magic_name__ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__magic_name__ = [network.get_input(i) for i in range(network.num_inputs)]
__magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__magic_name__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__magic_name__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__magic_name__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]):
A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase)
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : str = time.time()
A_ : Tuple = end_time - start_time
A_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__magic_name__ = raw_datasets['validation'].column_names
__magic_name__ = 'question' if 'question' in column_names else column_names[0]
__magic_name__ = 'context' if 'context' in column_names else column_names[1]
__magic_name__ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__magic_name__ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__magic_name__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( lowerCamelCase : Dict):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Union[str, Any] = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase)
A_ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__magic_name__ = raw_datasets['validation']
# Validation Feature Creation
__magic_name__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__magic_name__ = default_data_collator
__magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__magic_name__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
A_ : Tuple = postprocess_qa_predictions(
examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : Dict = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase)
__magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize
# Allocate device memory for inputs and outputs.
__magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__magic_name__ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__magic_name__ = 0.0
__magic_name__ = 0
__magic_name__ = timeit.default_timer()
__magic_name__ = None
for step, batch in enumerate(eval_dataloader):
__magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__magic_name__ , __magic_name__ = outputs
__magic_name__ = torch.tensor(start_logits)
__magic_name__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__magic_name__ = nested_truncate(all_preds, len(eval_dataset))
__magic_name__ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds)
__magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 27 | 1 |
'''simple docstring'''
import unittest
import numpy as np
from transformers import RoFormerConfig, is_flax_available
from transformers.testing_utils import require_flax, slow
from ...test_modeling_flax_common import FlaxModelTesterMixin, ids_tensor, random_attention_mask
if is_flax_available():
import jax.numpy as jnp
from transformers.models.roformer.modeling_flax_roformer import (
FlaxRoFormerForMaskedLM,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerModel,
)
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def __init__( self : Dict ,_a : int ,_a : Tuple=13 ,_a : str=7 ,_a : str=True ,_a : Optional[Any]=True ,_a : List[Any]=True ,_a : Optional[Any]=True ,_a : List[Any]=99 ,_a : Union[str, Any]=32 ,_a : Optional[int]=5 ,_a : Optional[int]=4 ,_a : Union[str, Any]=37 ,_a : List[str]="gelu" ,_a : Tuple=0.1 ,_a : str=0.1 ,_a : Optional[int]=512 ,_a : Union[str, Any]=16 ,_a : Dict=2 ,_a : Any=0.02 ,_a : List[Any]=4 ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : Any = batch_size
A_ : str = seq_length
A_ : Union[str, Any] = is_training
A_ : Any = use_attention_mask
A_ : str = use_token_type_ids
A_ : Dict = use_labels
A_ : List[str] = vocab_size
A_ : Optional[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : int = num_attention_heads
A_ : Optional[int] = intermediate_size
A_ : str = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : str = max_position_embeddings
A_ : Dict = type_vocab_size
A_ : List[str] = type_sequence_label_size
A_ : str = initializer_range
A_ : int = num_choices
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Union[str, Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : Dict = None
if self.use_attention_mask:
A_ : Optional[Any] = random_attention_mask([self.batch_size, self.seq_length] )
A_ : str = None
if self.use_token_type_ids:
A_ : Dict = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : List[Any] = RoFormerConfig(
vocab_size=self.vocab_size ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,max_position_embeddings=self.max_position_embeddings ,type_vocab_size=self.type_vocab_size ,is_decoder=_a ,initializer_range=self.initializer_range ,)
return config, input_ids, token_type_ids, attention_mask
def _a ( self : Dict ):
'''simple docstring'''
A_ : Any = self.prepare_config_and_inputs()
A_ , A_ , A_ , A_ : Union[str, Any] = config_and_inputs
A_ : Tuple = {"""input_ids""": input_ids, """token_type_ids""": token_type_ids, """attention_mask""": attention_mask}
return config, inputs_dict
@require_flax
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = True
a_ = (
(
FlaxRoFormerModel,
FlaxRoFormerForMaskedLM,
FlaxRoFormerForSequenceClassification,
FlaxRoFormerForTokenClassification,
FlaxRoFormerForMultipleChoice,
FlaxRoFormerForQuestionAnswering,
)
if is_flax_available()
else ()
)
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = FlaxRoFormerModelTester(self )
@slow
def _a ( self : Optional[Any] ):
'''simple docstring'''
for model_class_name in self.all_model_classes:
A_ : Optional[int] = model_class_name.from_pretrained("""junnyu/roformer_chinese_small""" ,from_pt=_a )
A_ : Union[str, Any] = model(np.ones((1, 1) ) )
self.assertIsNotNone(_a )
@require_flax
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = FlaxRoFormerForMaskedLM.from_pretrained("""junnyu/roformer_chinese_base""" )
A_ : Union[str, Any] = jnp.array([[0, 1, 2, 3, 4, 5]] )
A_ : List[Any] = model(_a )[0]
A_ : int = 50000
A_ : str = (1, 6, vocab_size)
self.assertEqual(output.shape ,_a )
A_ : Any = jnp.array(
[[[-0.1205, -1.0265, 0.2922], [-1.5134, 0.1974, 0.1519], [-5.0135, -3.9003, -0.8404]]] )
self.assertTrue(jnp.allclose(output[:, :3, :3] ,_a ,atol=1e-4 ) )
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ConvNextFeatureExtractor']
__magic_name__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 27 | 1 |
'''simple docstring'''
import os
from typing import Optional
import fsspec
from fsspec.archive import AbstractArchiveFileSystem
from fsspec.utils import DEFAULT_BLOCK_SIZE
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """"""
a_ = (
None # protocol passed in prefix to the url. ex: "gzip", for gzip://file.txt::http://foo.bar/file.txt.gz
)
a_ = None # compression type in fsspec. ex: "gzip"
a_ = None # extension of the filename to strip. ex: "".gz" to get file.txt from file.txt.gz
def __init__( self : int ,_a : str = "" ,_a : Optional[str] = None ,_a : Optional[dict] = None ,**_a : Optional[Any] ):
'''simple docstring'''
super().__init__(self ,**_a )
# always open as "rb" since fsspec can then use the TextIOWrapper to make it work for "r" mode
A_ : Dict = fsspec.open(
_a ,mode="""rb""" ,protocol=_a ,compression=self.compression ,client_kwargs={
"""requote_redirect_url""": False, # see https://github.com/huggingface/datasets/pull/5459
"""trust_env""": True, # Enable reading proxy env variables.
**(target_options or {}).pop("""client_kwargs""" ,{} ), # To avoid issues if it was already passed.
} ,**(target_options or {}) ,)
A_ : Dict = os.path.basename(self.file.path.split("""::""" )[0] )
A_ : Dict = (
self.compressed_name[: self.compressed_name.rindex(""".""" )]
if """.""" in self.compressed_name
else self.compressed_name
)
A_ : int = None
@classmethod
def _a ( cls : List[str] ,_a : str ):
'''simple docstring'''
return super()._strip_protocol(_a ).lstrip("""/""" )
def _a ( self : Optional[Any] ):
'''simple docstring'''
if self.dir_cache is None:
A_ : Optional[Any] = {**self.file.fs.info(self.file.path ), """name""": self.uncompressed_name}
A_ : Optional[Any] = {f["""name"""]: f}
def _a ( self : Optional[int] ,_a : str ):
'''simple docstring'''
return self.file.open().read()
def _a ( self : Optional[int] ,_a : str ,_a : str = "rb" ,_a : Any=None ,_a : str=True ,_a : int=None ,**_a : List[Any] ,):
'''simple docstring'''
A_ : Dict = self._strip_protocol(_a )
if mode != "rb":
raise ValueError(f'Tried to read with mode {mode} on file {self.file.path} opened with mode \'rb\'' )
return self.file.open()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """bz2"""
a_ = """bz2"""
a_ = """.bz2"""
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """gzip"""
a_ = """gzip"""
a_ = """.gz"""
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """lz4"""
a_ = """lz4"""
a_ = """.lz4"""
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """xz"""
a_ = """xz"""
a_ = """.xz"""
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """zstd"""
a_ = """zstd"""
a_ = """.zst"""
def __init__( self : Union[str, Any] ,_a : str ,_a : str = "rb" ,_a : Optional[str] = None ,_a : Optional[dict] = None ,_a : int = DEFAULT_BLOCK_SIZE ,**_a : Union[str, Any] ,):
'''simple docstring'''
super().__init__(
fo=_a ,mode=_a ,target_protocol=_a ,target_options=_a ,block_size=_a ,**_a ,)
# We need to wrap the zstd decompressor to avoid this error in fsspec==2021.7.0 and zstandard==0.15.2:
#
# File "/Users/user/.virtualenvs/hf-datasets/lib/python3.7/site-packages/fsspec/core.py", line 145, in open
# out.close = close
# AttributeError: 'zstd.ZstdDecompressionReader' object attribute 'close' is read-only
#
# see https://github.com/intake/filesystem_spec/issues/725
A_ : Optional[Any] = self.file.__enter__
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Tuple ,_a : List[Any] ):
'''simple docstring'''
A_ : Tuple = file_
def __enter__( self : Tuple ):
'''simple docstring'''
self._file.__enter__()
return self
def __exit__( self : Union[str, Any] ,*_a : Tuple ,**_a : List[str] ):
'''simple docstring'''
self._file.__exit__(*_a ,**_a )
def __iter__( self : Any ):
'''simple docstring'''
return iter(self._file )
def _a ( self : Dict ):
'''simple docstring'''
return next(self._file )
def __getattr__( self : str ,_a : Tuple ):
'''simple docstring'''
return getattr(self._file ,_a )
def fixed_enter(*_a : Dict ,**_a : Dict ):
return WrappedFile(_enter(*_a ,**_a ) )
A_ : List[str] = fixed_enter
| 27 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_text_model"""
def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Optional[int] = intermediate_size
A_ : Optional[int] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : int = max_position_embeddings
A_ : str = hidden_act
A_ : Union[str, Any] = layer_norm_eps
A_ : Tuple = attention_dropout
A_ : Union[str, Any] = initializer_range
A_ : List[Any] = initializer_factor
@classmethod
def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : int = cls.get_config_dict(_a ,**_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_vision_model"""
def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,):
'''simple docstring'''
super().__init__(**_a )
A_ : List[str] = hidden_size
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = num_channels
A_ : str = image_size
A_ : List[Any] = patch_size
A_ : int = hidden_act
A_ : List[Any] = layer_norm_eps
A_ : List[str] = attention_dropout
A_ : str = initializer_range
A_ : str = initializer_factor
@classmethod
def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit"""
a_ = True
def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a )
if text_config is None:
A_ : List[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
A_ : Dict = OwlViTTextConfig(**_a )
A_ : Dict = OwlViTVisionConfig(**_a )
A_ : Any = projection_dim
A_ : Optional[int] = logit_scale_init_value
A_ : Optional[int] = return_dict
A_ : Dict = 1.0
@classmethod
def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a )
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
@classmethod
def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ):
'''simple docstring'''
A_ : str = {}
A_ : int = text_config
A_ : Union[str, Any] = vision_config
return cls.from_dict(_a ,**_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : Optional[int] = self.vision_config.to_dict()
A_ : List[Any] = self.__class__.model_type
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _a ( self : str ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a )
A_ : Any = super().generate_dummy_inputs(
processor.image_processor ,batch_size=_a ,framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 14
| 27 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int):
A_ : List[str] = n ** (1 / 3)
return (val * val * val) == n
if __name__ == "__main__":
print(perfect_cube(27))
print(perfect_cube(4))
| 27 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""input_features""", """is_longer"""]
def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,)
A_ : Tuple = top_db
A_ : Tuple = truncation
A_ : Optional[Any] = padding
A_ : Optional[int] = fft_window_size
A_ : Dict = (fft_window_size >> 1) + 1
A_ : Any = hop_length
A_ : List[Any] = max_length_s
A_ : Tuple = max_length_s * sampling_rate
A_ : Tuple = sampling_rate
A_ : Optional[int] = frequency_min
A_ : Tuple = frequency_max
A_ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,)
A_ : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def _a ( self : int ):
'''simple docstring'''
A_ : int = copy.deepcopy(self.__dict__ )
A_ : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ):
'''simple docstring'''
A_ : List[str] = spectrogram(
_a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,)
return log_mel_spectrogram.T
def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : int = [0]
# randomly choose index for each part
A_ : List[str] = np.random.choice(ranges[0] )
A_ : int = np.random.choice(ranges[1] )
A_ : Optional[int] = np.random.choice(ranges[2] )
A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :]
A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
A_ : Dict = mel[idx_back : idx_back + chunk_frames, :]
A_ : Optional[int] = torch.tensor(mel[None, None, :] )
A_ : Dict = torch.nn.functional.interpolate(
_a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a )
A_ : str = mel_shrink[0][0].numpy()
A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A_ : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A_ : Tuple = len(_a ) - max_length
A_ : Optional[int] = np.random.randint(0 ,overflow + 1 )
A_ : List[Any] = waveform[idx : idx + max_length]
A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A_ : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 )
A_ : str = False
else:
A_ : str = self._random_mel_fusion(_a ,_a ,_a )
A_ : Optional[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
A_ : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A_ : int = int(max_length / len(_a ) )
A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A_ : List[str] = int(max_length / len(_a ) )
A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) )
A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 )
if truncation == "fusion":
A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : List[str] = truncation if truncation is not None else self.truncation
A_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
A_ : int = is_batched_numpy or (
isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a ,np.ndarray ):
A_ : str = np.asarray(_a ,dtype=np.floataa )
elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Any = [np.asarray(_a )]
# convert to mel spectrogram, truncate and pad if needed.
A_ : str = [
self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a )
for waveform in raw_speech
]
A_ : int = []
A_ : Any = []
for mel, longer in padded_inputs:
input_mel.append(_a )
is_longer.append(_a )
if truncation == "fusion" and sum(_a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A_ : List[Any] = np.random.randint(0 ,len(_a ) )
A_ : List[str] = True
if isinstance(input_mel[0] ,_a ):
A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A_ : List[str] = [[longer] for longer in is_longer]
A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
A_ : int = BatchFeature(_a )
if return_tensors is not None:
A_ : int = input_features.convert_to_tensors(_a )
return input_features
| 27 | 1 |
'''simple docstring'''
from ...configuration_utils import PretrainedConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'facebook/nllb-moe-54B': 'https://huggingface.co/facebook/nllb-moe-54b/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """nllb-moe"""
a_ = ["""past_key_values"""]
a_ = {"""num_attention_heads""": """encoder_attention_heads""", """hidden_size""": """d_model"""}
def __init__( self : Union[str, Any] ,_a : Dict=128112 ,_a : Union[str, Any]=1024 ,_a : Any=12 ,_a : Tuple=4096 ,_a : Optional[int]=16 ,_a : str=12 ,_a : Optional[Any]=4096 ,_a : List[Any]=16 ,_a : Dict=0.05 ,_a : Tuple=0.05 ,_a : List[Any]=True ,_a : str=True ,_a : Union[str, Any]="relu" ,_a : Optional[Any]=1024 ,_a : Optional[Any]=0.1 ,_a : Optional[Any]=0.1 ,_a : List[Any]=0.0 ,_a : Optional[Any]=0.02 ,_a : List[Any]=2 ,_a : Optional[int]=True ,_a : Optional[Any]=False ,_a : Union[str, Any]="float32" ,_a : Union[str, Any]=False ,_a : List[str]=128 ,_a : Optional[int]=64 ,_a : List[str]=4 ,_a : Tuple=4 ,_a : int=0.001 ,_a : Any=0.001 ,_a : Union[str, Any]="all" ,_a : int=False ,_a : List[str]=False ,_a : Dict=1.0 ,_a : Any=0.2 ,_a : Dict=1 ,_a : int=0 ,_a : Optional[Any]=2 ,_a : Any=False ,**_a : List[str] ,):
'''simple docstring'''
A_ : int = vocab_size
A_ : int = max_position_embeddings
A_ : Any = d_model
A_ : List[Any] = encoder_ffn_dim
A_ : Dict = encoder_layers
A_ : Union[str, Any] = encoder_attention_heads
A_ : int = decoder_ffn_dim
A_ : List[Any] = decoder_layers
A_ : Optional[int] = decoder_attention_heads
A_ : Tuple = dropout
A_ : int = attention_dropout
A_ : Optional[Any] = activation_dropout
A_ : Tuple = activation_function
A_ : Any = init_std
A_ : str = encoder_layerdrop
A_ : str = decoder_layerdrop
A_ : Optional[Any] = use_cache
A_ : int = encoder_layers
A_ : Dict = scale_embedding # scale factor will be sqrt(d_model) if True
A_ : Any = router_z_loss_coef
A_ : Tuple = router_aux_loss_coef
A_ : List[Any] = decoder_sparse_step
A_ : Optional[int] = encoder_sparse_step
A_ : Any = num_experts
A_ : Optional[Any] = expert_capacity
A_ : Optional[Any] = router_bias
if router_dtype not in ["float32", "float16", "bfloat16"]:
raise ValueError(f'`router_dtype` must be one of \'float32\', \'float16\' or \'bfloat16\', got {router_dtype}' )
A_ : Tuple = router_dtype
A_ : Any = router_ignore_padding_tokens
A_ : Any = batch_prioritized_routing
A_ : Tuple = second_expert_policy
A_ : Dict = normalize_router_prob_before_dropping
A_ : Any = moe_eval_capacity_token_fraction
A_ : Optional[Any] = moe_token_dropout
A_ : List[str] = output_router_logits
super().__init__(
pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,is_encoder_decoder=_a ,decoder_start_token_id=_a ,**_a ,)
| 27 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : str = batch_size
A_ : int = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_token_type_ids
A_ : int = use_labels
A_ : Dict = vocab_size
A_ : List[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : int = intermediate_size
A_ : Tuple = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Tuple = type_sequence_label_size
A_ : int = initializer_range
A_ : Optional[Any] = num_labels
A_ : str = num_choices
A_ : Optional[Any] = scope
A_ : List[Any] = self.vocab_size - 1
def _a ( self : Any ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : int = None
A_ : str = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Any = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = OpenAIGPTModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a )
A_ : str = model(_a ,token_type_ids=_a )
A_ : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ):
'''simple docstring'''
A_ : str = OpenAIGPTLMHeadModel(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ):
'''simple docstring'''
A_ : Any = OpenAIGPTDoubleHeadsModel(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : int = OpenAIGPTForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : str = config_and_inputs
A_ : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ):
'''simple docstring'''
A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,)
A_ : Any = inputs_dict["""labels"""]
A_ : Any = inputs_dict["""labels"""]
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,)
A_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = OpenAIGPTModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_a )
A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is
A_ : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : int = model.generate(_a ,do_sample=_a )
self.assertListEqual(output_ids[0].tolist() ,_a )
| 27 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import _LazyModule
__magic_name__ = {'tokenization_byt5': ['ByT5Tokenizer']}
if TYPE_CHECKING:
from .tokenization_byta import ByTaTokenizer
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 |
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 1 |
'''simple docstring'''
import argparse
import json
from collections import OrderedDict
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import PoolFormerConfig, PoolFormerForImageClassification, PoolFormerImageProcessor
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Tuple , lowerCamelCase : List[str]):
A_ : List[Any] = original_name.split(""".""")[0]
A_ : str = key.split(""".""")
A_ : List[Any] = int(key_list[key_list.index(lowerCamelCase) - 2])
A_ : List[Any] = int(key_list[key_list.index(lowerCamelCase) - 1])
A_ : List[Any] = orig_block_num - offset
A_ : Optional[Any] = key.replace(F'{orig_block_num}.{layer_num}.{original_name}' , F'block.{new_block_num}.{layer_num}.{new_name}')
return key
def lowerCamelCase ( lowerCamelCase : Any):
A_ : str = OrderedDict()
A_ , A_ : Tuple = 0, 0
for key, value in state_dict.items():
if key.startswith("""network"""):
A_ : List[str] = key.replace("""network""" , """poolformer.encoder""")
if "proj" in key:
# Works for the first embedding as well as the internal embedding layers
if key.endswith("""bias""") and "patch_embed" not in key:
patch_emb_offset += 1
A_ : Optional[int] = key[: key.find("""proj""")]
A_ : List[str] = key.replace(lowerCamelCase , F'patch_embeddings.{total_embed_found}.')
A_ : Tuple = key.replace("""proj""" , """projection""")
if key.endswith("""bias"""):
total_embed_found += 1
if "patch_embeddings" in key:
A_ : Tuple = """poolformer.encoder.""" + key
if "mlp.fc1" in key:
A_ : Any = replace_key_with_offset(lowerCamelCase , lowerCamelCase , """mlp.fc1""" , """output.conv1""")
if "mlp.fc2" in key:
A_ : str = replace_key_with_offset(lowerCamelCase , lowerCamelCase , """mlp.fc2""" , """output.conv2""")
if "norm1" in key:
A_ : Dict = replace_key_with_offset(lowerCamelCase , lowerCamelCase , """norm1""" , """before_norm""")
if "norm2" in key:
A_ : Any = replace_key_with_offset(lowerCamelCase , lowerCamelCase , """norm2""" , """after_norm""")
if "layer_scale_1" in key:
A_ : Any = replace_key_with_offset(lowerCamelCase , lowerCamelCase , """layer_scale_1""" , """layer_scale_1""")
if "layer_scale_2" in key:
A_ : List[Any] = replace_key_with_offset(lowerCamelCase , lowerCamelCase , """layer_scale_2""" , """layer_scale_2""")
if "head" in key:
A_ : int = key.replace("""head""" , """classifier""")
A_ : Optional[int] = value
return new_state_dict
def lowerCamelCase ( ):
A_ : str = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return image
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Any , lowerCamelCase : Dict):
A_ : List[str] = PoolFormerConfig()
# set attributes based on model_name
A_ : Tuple = """huggingface/label-files"""
A_ : List[Any] = model_name[-3:]
A_ : int = 1000
A_ : Tuple = """imagenet-1k-id2label.json"""
A_ : str = (1, 1000)
# set config attributes
A_ : Tuple = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : List[str] = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : Tuple = idalabel
A_ : Union[str, Any] = {v: k for k, v in idalabel.items()}
if size == "s12":
A_ : List[str] = [2, 2, 6, 2]
A_ : str = [64, 128, 320, 512]
A_ : Optional[Any] = 4.0
A_ : Tuple = 0.9
elif size == "s24":
A_ : str = [4, 4, 12, 4]
A_ : Any = [64, 128, 320, 512]
A_ : Any = 4.0
A_ : Union[str, Any] = 0.9
elif size == "s36":
A_ : Optional[int] = [6, 6, 18, 6]
A_ : List[Any] = [64, 128, 320, 512]
A_ : Optional[Any] = 4.0
A_ : str = 1E-6
A_ : Optional[int] = 0.9
elif size == "m36":
A_ : Optional[int] = [6, 6, 18, 6]
A_ : Optional[int] = [96, 192, 384, 768]
A_ : List[Any] = 4.0
A_ : List[str] = 1E-6
A_ : List[Any] = 0.95
elif size == "m48":
A_ : Optional[int] = [8, 8, 24, 8]
A_ : Optional[int] = [96, 192, 384, 768]
A_ : Dict = 4.0
A_ : List[str] = 1E-6
A_ : Tuple = 0.95
else:
raise ValueError(F'Size {size} not supported')
# load image processor
A_ : str = PoolFormerImageProcessor(crop_pct=lowerCamelCase)
# Prepare image
A_ : Union[str, Any] = prepare_img()
A_ : Tuple = image_processor(images=lowerCamelCase , return_tensors="""pt""").pixel_values
logger.info(F'Converting model {model_name}...')
# load original state dict
A_ : Optional[int] = torch.load(lowerCamelCase , map_location=torch.device("""cpu"""))
# rename keys
A_ : Union[str, Any] = rename_keys(lowerCamelCase)
# create HuggingFace model and load state dict
A_ : Optional[int] = PoolFormerForImageClassification(lowerCamelCase)
model.load_state_dict(lowerCamelCase)
model.eval()
# Define image processor
A_ : int = PoolFormerImageProcessor(crop_pct=lowerCamelCase)
A_ : List[Any] = image_processor(images=prepare_img() , return_tensors="""pt""").pixel_values
# forward pass
A_ : str = model(lowerCamelCase)
A_ : Optional[int] = outputs.logits
# define expected logit slices for different models
if size == "s12":
A_ : List[str] = torch.tensor([-0.3045, -0.6758, -0.4869])
elif size == "s24":
A_ : Optional[int] = torch.tensor([0.4402, -0.1374, -0.8045])
elif size == "s36":
A_ : Tuple = torch.tensor([-0.6080, -0.5133, -0.5898])
elif size == "m36":
A_ : str = torch.tensor([0.3952, 0.2263, -1.2668])
elif size == "m48":
A_ : Union[str, Any] = torch.tensor([0.1167, -0.0656, -0.3423])
else:
raise ValueError(F'Size {size} not supported')
# verify logits
assert logits.shape == expected_shape
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-2)
# finally, save model and image processor
logger.info(F'Saving PyTorch model and image processor to {pytorch_dump_folder_path}...')
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
parser.add_argument(
'--model_name',
default='poolformer_s12',
type=str,
help='Name of the model you\'d like to convert.',
)
parser.add_argument(
'--checkpoint_path', default=None, type=str, help='Path to the original PyTorch checkpoint (.pth file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', default=None, type=str, help='Path to the folder to output PyTorch model.'
)
__magic_name__ = parser.parse_args()
convert_poolformer_checkpoint(args.model_name, args.checkpoint_path, args.pytorch_dump_folder_path)
| 27 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def lowerCamelCase ( lowerCamelCase : str):
# word like '180' or '身高' or '神'
for char in word:
A_ : Optional[Any] = ord(lowerCamelCase)
if not _is_chinese_char(lowerCamelCase):
return 0
return 1
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Any = set()
for token in tokens:
A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase)
if chinese_word:
word_set.add(lowerCamelCase)
A_ : Any = list(lowerCamelCase)
return word_list
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()):
if not chinese_word_set:
return bert_tokens
A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set])
A_ : str = bert_tokens
A_ , A_ : Any = 0, len(lowerCamelCase)
while start < end:
A_ : Tuple = True
if is_chinese(bert_word[start]):
A_ : List[str] = min(end - start , lowerCamelCase)
for i in range(lowerCamelCase , 1 , -1):
A_ : Tuple = """""".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
A_ : Dict = """##""" + bert_word[j]
A_ : str = start + i
A_ : Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer):
A_ : Union[str, Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws
A_ : int = [get_chinese_word(lowerCamelCase) for r in res]
ltp_res.extend(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : List[Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512)
bert_res.extend(res["""input_ids"""])
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Union[str, Any] = []
for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = []
for id in input_ids:
A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase)
input_tokens.append(lowerCamelCase)
A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase)
A_ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase):
if token[:2] == "##":
A_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)):
ref_id.append(lowerCamelCase)
ref_ids.append(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
return ref_ids
def lowerCamelCase ( lowerCamelCase : Tuple):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""") as f:
A_ : Optional[int] = f.readlines()
A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device
A_ : Dict = BertTokenizer.from_pretrained(args.bert)
A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase)
with open(args.save_path , """w""" , encoding="""utf-8""") as f:
A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__magic_name__ = parser.parse_args()
main(args)
| 27 | 1 |
'''simple docstring'''
import logging
from pathlib import Path
import numpy as np
import pytorch_lightning as pl
import torch
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
from pytorch_lightning.utilities import rank_zero_only
from utils_rag import save_json
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
A_ : Tuple = filter(lambda lowerCamelCase: p.requires_grad , model.parameters())
A_ : List[str] = sum([np.prod(p.size()) for p in model_parameters])
return params
__magic_name__ = logging.getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : str):
if metric == "rouge2":
A_ : List[str] = """{val_avg_rouge2:.4f}-{step_count}"""
elif metric == "bleu":
A_ : List[str] = """{val_avg_bleu:.4f}-{step_count}"""
elif metric == "em":
A_ : Dict = """{val_avg_em:.4f}-{step_count}"""
else:
raise NotImplementedError(
F'seq2seq callbacks only support rouge2 and bleu, got {metric}, You can make your own by adding to this'
""" function.""")
A_ : Any = ModelCheckpoint(
dirpath=lowerCamelCase , filename=lowerCamelCase , monitor=F'val_{metric}' , mode="""max""" , save_top_k=3 , every_n_epochs=1 , )
return checkpoint_callback
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Optional[int]):
return EarlyStopping(
monitor=F'val_{metric}' , mode="""min""" if """loss""" in metric else """max""" , patience=lowerCamelCase , verbose=lowerCamelCase , )
class __lowerCAmelCase ( pl.Callback ):
'''simple docstring'''
def _a ( self : str ,_a : Optional[Any] ,_a : Optional[Any] ):
'''simple docstring'''
A_ : int = {f'lr_group_{i}': param["""lr"""] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups )}
pl_module.logger.log_metrics(_a )
@rank_zero_only
def _a ( self : Dict ,_a : pl.Trainer ,_a : pl.LightningModule ,_a : str ,_a : Union[str, Any]=True ):
'''simple docstring'''
logger.info(f'***** {type_path} results at step {trainer.global_step:05d} *****' )
A_ : Dict = trainer.callback_metrics
trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["""log""", """progress_bar""", """preds"""]} )
# Log results
A_ : str = Path(pl_module.hparams.output_dir )
if type_path == "test":
A_ : Dict = od / """test_results.txt"""
A_ : Optional[Any] = od / """test_generations.txt"""
else:
# this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json
# If people want this it will be easy enough to add back.
A_ : Union[str, Any] = od / f'{type_path}_results/{trainer.global_step:05d}.txt'
A_ : Optional[Any] = od / f'{type_path}_generations/{trainer.global_step:05d}.txt'
results_file.parent.mkdir(exist_ok=_a )
generations_file.parent.mkdir(exist_ok=_a )
with open(_a ,"""a+""" ) as writer:
for key in sorted(_a ):
if key in ["log", "progress_bar", "preds"]:
continue
A_ : Optional[int] = metrics[key]
if isinstance(_a ,torch.Tensor ):
A_ : Union[str, Any] = val.item()
A_ : Dict = f'{key}: {val:.6f}\n'
writer.write(_a )
if not save_generations:
return
if "preds" in metrics:
A_ : int = """\n""".join(metrics["""preds"""] )
generations_file.open("""w+""" ).write(_a )
@rank_zero_only
def _a ( self : Optional[int] ,_a : List[Any] ,_a : str ):
'''simple docstring'''
try:
A_ : Union[str, Any] = pl_module.model.model.num_parameters()
except AttributeError:
A_ : Tuple = pl_module.model.num_parameters()
A_ : str = count_trainable_parameters(_a )
# mp stands for million parameters
trainer.logger.log_metrics({"""n_params""": npars, """mp""": npars / 1e6, """grad_mp""": n_trainable_pars / 1e6} )
@rank_zero_only
def _a ( self : Optional[Any] ,_a : pl.Trainer ,_a : pl.LightningModule ):
'''simple docstring'''
save_json(pl_module.metrics ,pl_module.metrics_save_path )
return self._write_logs(_a ,_a ,"""test""" )
@rank_zero_only
def _a ( self : Union[str, Any] ,_a : pl.Trainer ,_a : List[Any] ):
'''simple docstring'''
save_json(pl_module.metrics ,pl_module.metrics_save_path )
# Uncommenting this will save val generations
# return self._write_logs(trainer, pl_module, "valid")
| 27 |
'''simple docstring'''
import warnings
from typing import List, Optional, Union
from ...processing_utils import ProcessorMixin
from ...tokenization_utils_base import BatchEncoding, PaddingStrategy, PreTokenizedInput, TextInput, TruncationStrategy
from ...utils import TensorType
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""image_processor""", """tokenizer"""]
a_ = """ViltImageProcessor"""
a_ = ("""BertTokenizer""", """BertTokenizerFast""")
def __init__( self : List[Any] ,_a : Optional[Any]=None ,_a : List[str]=None ,**_a : Any ):
'''simple docstring'''
A_ : Any = None
if "feature_extractor" in kwargs:
warnings.warn(
"""The `feature_extractor` argument is deprecated and will be removed in v5, use `image_processor`"""
""" instead.""" ,_a ,)
A_ : List[str] = kwargs.pop("""feature_extractor""" )
A_ : List[Any] = image_processor if image_processor is not None else feature_extractor
if image_processor is None:
raise ValueError("""You need to specify an `image_processor`.""" )
if tokenizer is None:
raise ValueError("""You need to specify a `tokenizer`.""" )
super().__init__(_a ,_a )
A_ : Optional[Any] = self.image_processor
def __call__( self : Any ,_a : Tuple ,_a : Union[TextInput, PreTokenizedInput, List[TextInput], List[PreTokenizedInput]] = None ,_a : bool = True ,_a : Union[bool, str, PaddingStrategy] = False ,_a : Union[bool, str, TruncationStrategy] = None ,_a : Optional[int] = None ,_a : int = 0 ,_a : Optional[int] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = False ,_a : bool = True ,_a : Optional[Union[str, TensorType]] = None ,**_a : Tuple ,):
'''simple docstring'''
A_ : int = self.tokenizer(
text=_a ,add_special_tokens=_a ,padding=_a ,truncation=_a ,max_length=_a ,stride=_a ,pad_to_multiple_of=_a ,return_token_type_ids=_a ,return_attention_mask=_a ,return_overflowing_tokens=_a ,return_special_tokens_mask=_a ,return_offsets_mapping=_a ,return_length=_a ,verbose=_a ,return_tensors=_a ,**_a ,)
# add pixel_values + pixel_mask
A_ : Optional[int] = self.image_processor(_a ,return_tensors=_a )
encoding.update(_a )
return encoding
def _a ( self : List[Any] ,*_a : Any ,**_a : Any ):
'''simple docstring'''
return self.tokenizer.batch_decode(*_a ,**_a )
def _a ( self : int ,*_a : int ,**_a : Optional[int] ):
'''simple docstring'''
return self.tokenizer.decode(*_a ,**_a )
@property
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : Optional[int] = self.tokenizer.model_input_names
A_ : str = self.image_processor.model_input_names
return list(dict.fromkeys(tokenizer_input_names + image_processor_input_names ) )
@property
def _a ( self : str ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor_class` is deprecated and will be removed in v5. Use `image_processor_class` instead.""" ,_a ,)
return self.image_processor_class
@property
def _a ( self : int ):
'''simple docstring'''
warnings.warn(
"""`feature_extractor` is deprecated and will be removed in v5. Use `image_processor` instead.""" ,_a ,)
return self.image_processor
| 27 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int):
if a < 0:
raise ValueError("""Input value must be a positive integer""")
elif isinstance(lowerCamelCase , lowerCamelCase):
raise TypeError("""Input value must be a 'int' type""")
return bin(lowerCamelCase).count("""1""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""torch""", """torchsde"""]
def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(self ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
| 27 | 1 |
'''simple docstring'''
import os
from shutil import copyfile
from typing import Any, Dict, List, Optional, Tuple
import sentencepiece as spm
from ...tokenization_utils import PreTrainedTokenizer
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'spiece.model'}
__magic_name__ = {
'vocab_file': {
'bert_for_seq_generation': (
'https://huggingface.co/google/bert_for_seq_generation_L-24_bbc_encoder/resolve/main/spiece.model'
),
}
}
__magic_name__ = {'bert_for_seq_generation': 512}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = []
a_ = ["""input_ids""", """attention_mask"""]
def __init__( self : Any ,_a : Optional[Any] ,_a : Union[str, Any]="<s>" ,_a : int="</s>" ,_a : List[str]="<unk>" ,_a : Any="<pad>" ,_a : List[str]="<::::>" ,_a : Optional[Dict[str, Any]] = None ,**_a : str ,):
'''simple docstring'''
A_ : Optional[Any] = {} if sp_model_kwargs is None else sp_model_kwargs
# Add extra_ids to the special token list
super().__init__(
bos_token=_a ,eos_token=_a ,unk_token=_a ,pad_token=_a ,sep_token=_a ,sp_model_kwargs=self.sp_model_kwargs ,**_a ,)
A_ : List[str] = vocab_file
A_ : Dict = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(_a )
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.sp_model.get_piece_size()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = {self.convert_ids_to_tokens(_a ): i for i in range(self.vocab_size )}
vocab.update(self.added_tokens_encoder )
return vocab
def __getstate__( self : List[str] ):
'''simple docstring'''
A_ : List[Any] = self.__dict__.copy()
A_ : str = None
return state
def __setstate__( self : Tuple ,_a : Optional[int] ):
'''simple docstring'''
A_ : Tuple = d
# for backward compatibility
if not hasattr(self ,"""sp_model_kwargs""" ):
A_ : Optional[Any] = {}
A_ : Optional[Any] = spm.SentencePieceProcessor(**self.sp_model_kwargs )
self.sp_model.Load(self.vocab_file )
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
return self.sp_model.encode(_a ,out_type=_a )
def _a ( self : int ,_a : Optional[Any] ):
'''simple docstring'''
return self.sp_model.piece_to_id(_a )
def _a ( self : Any ,_a : int ):
'''simple docstring'''
A_ : Tuple = self.sp_model.IdToPiece(_a )
return token
def _a ( self : List[str] ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = []
A_ : Union[str, Any] = """"""
for token in tokens:
# make sure that special tokens are not decoded using sentencepiece model
if token in self.all_special_tokens:
out_string += self.sp_model.decode(_a ) + token
A_ : Dict = []
else:
current_sub_tokens.append(_a )
out_string += self.sp_model.decode(_a )
return out_string.strip()
def _a ( self : Dict ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
if not os.path.isdir(_a ):
logger.error(f'Vocabulary path ({save_directory}) should be a directory' )
return
A_ : Tuple = os.path.join(
_a ,(filename_prefix + """-""" if filename_prefix else """""") + VOCAB_FILES_NAMES["""vocab_file"""] )
if os.path.abspath(self.vocab_file ) != os.path.abspath(_a ) and os.path.isfile(self.vocab_file ):
copyfile(self.vocab_file ,_a )
elif not os.path.isfile(self.vocab_file ):
with open(_a ,"""wb""" ) as fi:
A_ : Union[str, Any] = self.sp_model.serialized_model_proto()
fi.write(_a )
return (out_vocab_file,)
| 27 |
'''simple docstring'''
import itertools
import json
import linecache
import os
import pickle
import re
import socket
import string
from collections import Counter
from logging import getLogger
from pathlib import Path
from typing import Callable, Dict, Iterable, List
import git
import torch
from torch.utils.data import Dataset
from transformers import BartTokenizer, RagTokenizer, TaTokenizer
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Tuple , lowerCamelCase : int , lowerCamelCase : Optional[Any] , lowerCamelCase : str=True , lowerCamelCase : Optional[Any]="pt"):
A_ : Optional[int] = {"""add_prefix_space""": True} if isinstance(lowerCamelCase , lowerCamelCase) and not line.startswith(""" """) else {}
A_ : Optional[int] = padding_side
return tokenizer(
[line] , max_length=lowerCamelCase , padding="""max_length""" if pad_to_max_length else None , truncation=lowerCamelCase , return_tensors=lowerCamelCase , add_special_tokens=lowerCamelCase , **lowerCamelCase , )
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Union[str, Any] , lowerCamelCase : List[Any]=None , ):
A_ : Dict = input_ids.ne(lowerCamelCase).any(dim=0)
if attention_mask is None:
return input_ids[:, keep_column_mask]
else:
return (input_ids[:, keep_column_mask], attention_mask[:, keep_column_mask])
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Optional[Any] ,_a : Tuple ,_a : Dict ,_a : Tuple ,_a : Tuple="train" ,_a : Optional[int]=None ,_a : Any=None ,_a : int=None ,_a : Union[str, Any]="" ,):
'''simple docstring'''
super().__init__()
A_ : Union[str, Any] = Path(_a ).joinpath(type_path + """.source""" )
A_ : Any = Path(_a ).joinpath(type_path + """.target""" )
A_ : Dict = self.get_char_lens(self.src_file )
A_ : Optional[int] = max_source_length
A_ : List[str] = max_target_length
assert min(self.src_lens ) > 0, f'found empty line in {self.src_file}'
A_ : List[Any] = tokenizer
A_ : Optional[Any] = prefix
if n_obs is not None:
A_ : Any = self.src_lens[:n_obs]
A_ : Optional[int] = src_lang
A_ : Tuple = tgt_lang
def __len__( self : Tuple ):
'''simple docstring'''
return len(self.src_lens )
def __getitem__( self : List[str] ,_a : Tuple ):
'''simple docstring'''
A_ : int = index + 1 # linecache starts at 1
A_ : Union[str, Any] = self.prefix + linecache.getline(str(self.src_file ) ,_a ).rstrip("""\n""" )
A_ : Dict = linecache.getline(str(self.tgt_file ) ,_a ).rstrip("""\n""" )
assert source_line, f'empty source line for index {index}'
assert tgt_line, f'empty tgt line for index {index}'
# Need to add eos token manually for T5
if isinstance(self.tokenizer ,_a ):
source_line += self.tokenizer.eos_token
tgt_line += self.tokenizer.eos_token
# Pad source and target to the right
A_ : List[str] = (
self.tokenizer.question_encoder if isinstance(self.tokenizer ,_a ) else self.tokenizer
)
A_ : Any = self.tokenizer.generator if isinstance(self.tokenizer ,_a ) else self.tokenizer
A_ : Optional[int] = encode_line(_a ,_a ,self.max_source_length ,"""right""" )
A_ : Optional[int] = encode_line(_a ,_a ,self.max_target_length ,"""right""" )
A_ : Optional[Any] = source_inputs["""input_ids"""].squeeze()
A_ : Dict = target_inputs["""input_ids"""].squeeze()
A_ : Union[str, Any] = source_inputs["""attention_mask"""].squeeze()
return {
"input_ids": source_ids,
"attention_mask": src_mask,
"decoder_input_ids": target_ids,
}
@staticmethod
def _a ( _a : int ):
'''simple docstring'''
return [len(_a ) for x in Path(_a ).open().readlines()]
def _a ( self : Optional[int] ,_a : Dict ):
'''simple docstring'''
A_ : str = torch.stack([x["""input_ids"""] for x in batch] )
A_ : Optional[Any] = torch.stack([x["""attention_mask"""] for x in batch] )
A_ : str = torch.stack([x["""decoder_input_ids"""] for x in batch] )
A_ : Union[str, Any] = (
self.tokenizer.generator.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : str = (
self.tokenizer.question_encoder.pad_token_id
if isinstance(self.tokenizer ,_a )
else self.tokenizer.pad_token_id
)
A_ : List[str] = trim_batch(_a ,_a )
A_ , A_ : Union[str, Any] = trim_batch(_a ,_a ,attention_mask=_a )
A_ : List[str] = {
"""input_ids""": source_ids,
"""attention_mask""": source_mask,
"""decoder_input_ids""": y,
}
return batch
__magic_name__ = getLogger(__name__)
def lowerCamelCase ( lowerCamelCase : List[List]):
return list(itertools.chain.from_iterable(lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : str):
A_ : Union[str, Any] = get_git_info()
save_json(lowerCamelCase , os.path.join(lowerCamelCase , """git_log.json"""))
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[Any] , lowerCamelCase : List[str]=4 , **lowerCamelCase : List[str]):
with open(lowerCamelCase , """w""") as f:
json.dump(lowerCamelCase , lowerCamelCase , indent=lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Any):
with open(lowerCamelCase) as f:
return json.load(lowerCamelCase)
def lowerCamelCase ( ):
A_ : List[str] = git.Repo(search_parent_directories=lowerCamelCase)
A_ : Union[str, Any] = {
"""repo_id""": str(lowerCamelCase),
"""repo_sha""": str(repo.head.object.hexsha),
"""repo_branch""": str(repo.active_branch),
"""hostname""": str(socket.gethostname()),
}
return repo_infos
def lowerCamelCase ( lowerCamelCase : Callable , lowerCamelCase : Iterable):
return list(map(lowerCamelCase , lowerCamelCase))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
with open(lowerCamelCase , """wb""") as f:
return pickle.dump(lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str]):
def remove_articles(lowerCamelCase : Any):
return re.sub(r"""\b(a|an|the)\b""" , """ """ , lowerCamelCase)
def white_space_fix(lowerCamelCase : List[Any]):
return " ".join(text.split())
def remove_punc(lowerCamelCase : Union[str, Any]):
A_ : Optional[int] = set(string.punctuation)
return "".join(ch for ch in text if ch not in exclude)
def lower(lowerCamelCase : List[str]):
return text.lower()
return white_space_fix(remove_articles(remove_punc(lower(lowerCamelCase))))
def lowerCamelCase ( lowerCamelCase : int , lowerCamelCase : int):
A_ : Tuple = normalize_answer(lowerCamelCase).split()
A_ : Dict = normalize_answer(lowerCamelCase).split()
A_ : int = Counter(lowerCamelCase) & Counter(lowerCamelCase)
A_ : Any = sum(common.values())
if num_same == 0:
return 0
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = 1.0 * num_same / len(lowerCamelCase)
A_ : Any = (2 * precision * recall) / (precision + recall)
return fa
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Any):
return normalize_answer(lowerCamelCase) == normalize_answer(lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : List[str]):
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Any = 0
for hypo, pred in zip(lowerCamelCase , lowerCamelCase):
em += exact_match_score(lowerCamelCase , lowerCamelCase)
if len(lowerCamelCase) > 0:
em /= len(lowerCamelCase)
return {"em": em}
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return model_prefix.startswith("""rag""")
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int , lowerCamelCase : Union[str, Any]):
A_ : Optional[Any] = {p: p for p in extra_params}
# T5 models don't have `dropout` param, they have `dropout_rate` instead
A_ : Tuple = """dropout_rate"""
for p in extra_params:
if getattr(lowerCamelCase , lowerCamelCase , lowerCamelCase):
if not hasattr(lowerCamelCase , lowerCamelCase) and not hasattr(lowerCamelCase , equivalent_param[p]):
logger.info("""config doesn't have a `{}` attribute""".format(lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
continue
A_ : Tuple = p if hasattr(lowerCamelCase , lowerCamelCase) else equivalent_param[p]
setattr(lowerCamelCase , lowerCamelCase , getattr(lowerCamelCase , lowerCamelCase))
delattr(lowerCamelCase , lowerCamelCase)
return hparams, config
| 27 | 1 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyVaaControlnetPipeline
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : Any ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def _a ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Tuple = UNetaDConditionModel(**_a )
return model
@property
def _a ( self : List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.dummy_unet
A_ : int = self.dummy_movq
A_ : Tuple = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,)
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ):
'''simple docstring'''
A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a )
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
A_ : Optional[Any] = torch.manual_seed(_a )
else:
A_ : str = torch.Generator(device=_a ).manual_seed(_a )
A_ : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = """cpu"""
A_ : List[str] = self.get_dummy_components()
A_ : Tuple = self.pipeline_class(**_a )
A_ : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) )
A_ : Tuple = output.images
A_ : Optional[Any] = pipe(
**self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0]
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0
A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_a )
A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa )
A_ : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = """A robot, 4k photo"""
A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ , A_ : List[str] = pipe_prior(
_a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ : List[Any] = pipeline(
image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,)
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a ,_a )
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_sentencepiece_available,
is_tokenizers_available,
is_torch_available,
)
__magic_name__ = {}
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizer']
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['NllbTokenizerFast']
if TYPE_CHECKING:
try:
if not is_sentencepiece_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb import NllbTokenizer
try:
if not is_tokenizers_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .tokenization_nllb_fast import NllbTokenizerFast
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 | 1 |
'''simple docstring'''
import inspect
import math
import tempfile
import unittest
import numpy as np
from transformers import ViTMAEConfig
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from transformers.utils import cached_property, is_torch_available, is_vision_available
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, floats_tensor, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from torch import nn
from transformers import ViTMAEForPreTraining, ViTMAEModel
from transformers.models.vit.modeling_vit import VIT_PRETRAINED_MODEL_ARCHIVE_LIST
if is_vision_available():
from PIL import Image
from transformers import ViTImageProcessor
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : Tuple ,_a : Dict=13 ,_a : Tuple=30 ,_a : Dict=2 ,_a : List[Any]=3 ,_a : List[Any]=True ,_a : Dict=True ,_a : Optional[Any]=32 ,_a : List[str]=5 ,_a : List[Any]=4 ,_a : str=37 ,_a : List[Any]="gelu" ,_a : Dict=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=10 ,_a : Optional[Any]=0.02 ,_a : Dict=3 ,_a : List[str]=0.6 ,_a : Optional[Any]=None ,):
'''simple docstring'''
A_ : List[str] = parent
A_ : Optional[int] = batch_size
A_ : Any = image_size
A_ : Tuple = patch_size
A_ : Any = num_channels
A_ : Dict = is_training
A_ : str = use_labels
A_ : List[str] = hidden_size
A_ : Optional[Any] = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : Optional[Any] = intermediate_size
A_ : str = hidden_act
A_ : Union[str, Any] = hidden_dropout_prob
A_ : Optional[int] = attention_probs_dropout_prob
A_ : List[str] = type_sequence_label_size
A_ : Optional[int] = initializer_range
A_ : Optional[Any] = mask_ratio
A_ : Optional[Any] = scope
# in ViTMAE, the expected sequence length = (num_patches + 1) * (1 - config.mask_ratio), rounded above
# (we add 1 for the [CLS] token)
A_ : Optional[Any] = (image_size // patch_size) ** 2
A_ : Any = int(math.ceil((1 - mask_ratio) * (num_patches + 1) ) )
def _a ( self : str ):
'''simple docstring'''
A_ : Dict = floats_tensor([self.batch_size, self.num_channels, self.image_size, self.image_size] )
A_ : Tuple = None
if self.use_labels:
A_ : str = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Any = self.get_config()
return config, pixel_values, labels
def _a ( self : List[Any] ):
'''simple docstring'''
return ViTMAEConfig(
image_size=self.image_size ,patch_size=self.patch_size ,num_channels=self.num_channels ,hidden_size=self.hidden_size ,num_hidden_layers=self.num_hidden_layers ,num_attention_heads=self.num_attention_heads ,intermediate_size=self.intermediate_size ,hidden_act=self.hidden_act ,hidden_dropout_prob=self.hidden_dropout_prob ,attention_probs_dropout_prob=self.attention_probs_dropout_prob ,is_decoder=_a ,initializer_range=self.initializer_range ,mask_ratio=self.mask_ratio ,)
def _a ( self : Dict ,_a : List[Any] ,_a : str ,_a : List[Any] ):
'''simple docstring'''
A_ : int = ViTMAEModel(config=_a )
model.to(_a )
model.eval()
A_ : Tuple = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Union[str, Any] ,_a : Any ,_a : int ,_a : List[Any] ):
'''simple docstring'''
A_ : Union[str, Any] = ViTMAEForPreTraining(_a )
model.to(_a )
model.eval()
A_ : Optional[Any] = model(_a )
A_ : Any = (self.image_size // self.patch_size) ** 2
A_ : str = self.patch_size**2 * self.num_channels
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
# test greyscale images
A_ : int = 1
A_ : Tuple = ViTMAEForPreTraining(_a )
model.to(_a )
model.eval()
A_ : str = floats_tensor([self.batch_size, 1, self.image_size, self.image_size] )
A_ : str = model(_a )
A_ : Any = self.patch_size**2
self.parent.assertEqual(result.logits.shape ,(self.batch_size, num_patches, expected_num_channels) )
def _a ( self : Any ):
'''simple docstring'''
A_ : List[str] = self.prepare_config_and_inputs()
A_ , A_ , A_ : Dict = config_and_inputs
A_ : List[str] = {"""pixel_values""": pixel_values}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (ViTMAEModel, ViTMAEForPreTraining) if is_torch_available() else ()
a_ = {"""feature-extraction""": ViTMAEModel} if is_torch_available() else {}
a_ = False
a_ = False
a_ = False
a_ = False
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = ViTMAEModelTester(self )
A_ : str = ConfigTester(self ,config_class=_a ,has_text_modality=_a ,hidden_size=37 )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
self.config_tester.run_common_tests()
@unittest.skip(reason="""ViTMAE does not use inputs_embeds""" )
def _a ( self : Any ):
'''simple docstring'''
pass
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ , A_ : Dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(_a )
self.assertIsInstance(model.get_input_embeddings() ,(nn.Module) )
A_ : Dict = model.get_output_embeddings()
self.assertTrue(x is None or isinstance(_a ,nn.Linear ) )
def _a ( self : str ):
'''simple docstring'''
A_ , A_ : int = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : str = model_class(_a )
A_ : List[Any] = inspect.signature(model.forward )
# signature.parameters is an OrderedDict => so arg_names order is deterministic
A_ : Union[str, Any] = [*signature.parameters.keys()]
A_ : Optional[int] = ["""pixel_values"""]
self.assertListEqual(arg_names[:1] ,_a )
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_model(*_a )
def _a ( self : str ):
'''simple docstring'''
A_ : Union[str, Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_for_pretraining(*_a )
def _a ( self : str ,_a : str ,_a : str ,_a : Any ):
'''simple docstring'''
np.random.seed(2 )
A_ : str = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2 )
A_ : int = np.random.uniform(size=(self.model_tester.batch_size, num_patches) )
A_ : Optional[Any] = torch.from_numpy(_a )
# Add `noise` argument.
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
A_ : List[Any] = pt_noise
super().check_pt_tf_models(_a ,_a ,_a )
def _a ( self : int ):
'''simple docstring'''
A_ , A_ : Any = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
A_ : List[str] = model_class(_a )
model.to(_a )
model.eval()
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ : Dict = model(**self._prepare_for_class(_a ,_a ) )
A_ : Tuple = outputs[0].cpu().numpy()
A_ : Optional[Any] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(_a )
A_ : Union[str, Any] = model_class.from_pretrained(_a )
model.to(_a )
# make random mask reproducible
torch.manual_seed(2 )
with torch.no_grad():
A_ : Union[str, Any] = model(**self._prepare_for_class(_a ,_a ) )
# Make sure we don't have nans
A_ : Optional[int] = after_outputs[0].cpu().numpy()
A_ : Tuple = 0
A_ : int = np.amax(np.abs(out_a - out_a ) )
self.assertLessEqual(_a ,1e-5 )
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _a ( self : List[Any] ):
'''simple docstring'''
pass
@unittest.skip(
reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load
to get deterministic results.""" )
def _a ( self : Optional[int] ):
'''simple docstring'''
pass
@unittest.skip(reason="""ViTMAE returns a random mask + ids_restore in each forward pass. See test_save_load""" )
def _a ( self : Dict ):
'''simple docstring'''
pass
@unittest.skip("""Will be fixed soon by reducing the size of the model used for common tests.""" )
def _a ( self : Tuple ):
'''simple docstring'''
pass
@slow
def _a ( self : int ):
'''simple docstring'''
for model_name in VIT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Optional[int] = ViTMAEModel.from_pretrained(_a )
self.assertIsNotNone(_a )
def lowerCamelCase ( ):
A_ : Optional[int] = Image.open("""./tests/fixtures/tests_samples/COCO/000000039769.png""")
return image
@require_torch
@require_vision
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@cached_property
def _a ( self : Union[str, Any] ):
'''simple docstring'''
return ViTImageProcessor.from_pretrained("""facebook/vit-mae-base""" ) if is_vision_available() else None
@slow
def _a ( self : int ):
'''simple docstring'''
np.random.seed(2 )
A_ : Optional[Any] = ViTMAEForPreTraining.from_pretrained("""facebook/vit-mae-base""" ).to(_a )
A_ : Dict = self.default_image_processor
A_ : Union[str, Any] = prepare_img()
A_ : List[Any] = image_processor(images=_a ,return_tensors="""pt""" ).to(_a )
# prepare a noise vector that will be also used for testing the TF model
# (this way we can ensure that the PT and TF models operate on the same inputs)
A_ : Optional[int] = ViTMAEConfig()
A_ : int = int((vit_mae_config.image_size // vit_mae_config.patch_size) ** 2 )
A_ : Dict = np.random.uniform(size=(1, num_patches) )
# forward pass
with torch.no_grad():
A_ : Tuple = model(**_a ,noise=torch.from_numpy(_a ).to(device=_a ) )
# verify the logits
A_ : Any = torch.Size((1, 196, 768) )
self.assertEqual(outputs.logits.shape ,_a )
A_ : int = torch.tensor(
[[-0.0548, -1.7023, -0.9325], [0.3721, -0.5670, -0.2233], [0.8235, -1.3878, -0.3524]] )
self.assertTrue(torch.allclose(outputs.logits[0, :3, :3] ,expected_slice.to(_a ) ,atol=1e-4 ) )
| 27 |
'''simple docstring'''
import gc
import random
import unittest
import numpy as np
import torch
from diffusers import (
DDIMScheduler,
KandinskyVaaControlnetPipeline,
KandinskyVaaPriorPipeline,
UNetaDConditionModel,
VQModel,
)
from diffusers.utils import floats_tensor, load_image, load_numpy, slow, torch_device
from diffusers.utils.testing_utils import enable_full_determinism, require_torch_gpu
from ..test_pipelines_common import PipelineTesterMixin, assert_mean_pixel_difference
enable_full_determinism()
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = KandinskyVaaControlnetPipeline
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = ["""image_embeds""", """negative_image_embeds""", """hint"""]
a_ = [
"""generator""",
"""height""",
"""width""",
"""latents""",
"""guidance_scale""",
"""num_inference_steps""",
"""return_dict""",
"""guidance_scale""",
"""num_images_per_prompt""",
"""output_type""",
"""return_dict""",
]
a_ = False
@property
def _a ( self : Any ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return 32
@property
def _a ( self : Tuple ):
'''simple docstring'''
return self.time_input_dim
@property
def _a ( self : str ):
'''simple docstring'''
return self.time_input_dim * 4
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 100
@property
def _a ( self : List[Any] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : List[Any] = {
"""in_channels""": 8,
# Out channels is double in channels because predicts mean and variance
"""out_channels""": 8,
"""addition_embed_type""": """image_hint""",
"""down_block_types""": ("""ResnetDownsampleBlock2D""", """SimpleCrossAttnDownBlock2D"""),
"""up_block_types""": ("""SimpleCrossAttnUpBlock2D""", """ResnetUpsampleBlock2D"""),
"""mid_block_type""": """UNetMidBlock2DSimpleCrossAttn""",
"""block_out_channels""": (self.block_out_channels_a, self.block_out_channels_a * 2),
"""layers_per_block""": 1,
"""encoder_hid_dim""": self.text_embedder_hidden_size,
"""encoder_hid_dim_type""": """image_proj""",
"""cross_attention_dim""": self.cross_attention_dim,
"""attention_head_dim""": 4,
"""resnet_time_scale_shift""": """scale_shift""",
"""class_embed_type""": None,
}
A_ : Tuple = UNetaDConditionModel(**_a )
return model
@property
def _a ( self : List[str] ):
'''simple docstring'''
return {
"block_out_channels": [32, 32, 64, 64],
"down_block_types": [
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"DownEncoderBlock2D",
"AttnDownEncoderBlock2D",
],
"in_channels": 3,
"latent_channels": 4,
"layers_per_block": 1,
"norm_num_groups": 8,
"norm_type": "spatial",
"num_vq_embeddings": 12,
"out_channels": 3,
"up_block_types": ["AttnUpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D", "UpDecoderBlock2D"],
"vq_embed_dim": 4,
}
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
torch.manual_seed(0 )
A_ : int = VQModel(**self.dummy_movq_kwargs )
return model
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Optional[Any] = self.dummy_unet
A_ : int = self.dummy_movq
A_ : Tuple = DDIMScheduler(
num_train_timesteps=1000 ,beta_schedule="""linear""" ,beta_start=0.00085 ,beta_end=0.012 ,clip_sample=_a ,set_alpha_to_one=_a ,steps_offset=1 ,prediction_type="""epsilon""" ,thresholding=_a ,)
A_ : int = {
"""unet""": unet,
"""scheduler""": scheduler,
"""movq""": movq,
}
return components
def _a ( self : Dict ,_a : str ,_a : Union[str, Any]=0 ):
'''simple docstring'''
A_ : Dict = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(_a ) ).to(_a )
A_ : int = floats_tensor((1, self.text_embedder_hidden_size) ,rng=random.Random(seed + 1 ) ).to(
_a )
# create hint
A_ : List[Any] = floats_tensor((1, 3, 64, 64) ,rng=random.Random(_a ) ).to(_a )
if str(_a ).startswith("""mps""" ):
A_ : Optional[Any] = torch.manual_seed(_a )
else:
A_ : str = torch.Generator(device=_a ).manual_seed(_a )
A_ : List[Any] = {
"""image_embeds""": image_embeds,
"""negative_image_embeds""": negative_image_embeds,
"""hint""": hint,
"""generator""": generator,
"""height""": 64,
"""width""": 64,
"""guidance_scale""": 4.0,
"""num_inference_steps""": 2,
"""output_type""": """np""",
}
return inputs
def _a ( self : Dict ):
'''simple docstring'''
A_ : List[Any] = """cpu"""
A_ : List[str] = self.get_dummy_components()
A_ : Tuple = self.pipeline_class(**_a )
A_ : Dict = pipe.to(_a )
pipe.set_progress_bar_config(disable=_a )
A_ : Tuple = pipe(**self.get_dummy_inputs(_a ) )
A_ : Tuple = output.images
A_ : Optional[Any] = pipe(
**self.get_dummy_inputs(_a ) ,return_dict=_a ,)[0]
A_ : Tuple = image[0, -3:, -3:, -1]
A_ : Any = image_from_tuple[0, -3:, -3:, -1]
assert image.shape == (1, 64, 64, 3)
A_ : List[Any] = np.array(
[0.6959826, 0.868279, 0.7558092, 0.68769467, 0.85805804, 0.65977496, 0.44885302, 0.5959111, 0.4251595] )
assert (
np.abs(image_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_slice.flatten()}'
assert (
np.abs(image_from_tuple_slice.flatten() - expected_slice ).max() < 1e-2
), f' expected_slice {expected_slice}, but got {image_from_tuple_slice.flatten()}'
@slow
@require_torch_gpu
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
def _a ( self : Tuple ):
'''simple docstring'''
super().tearDown()
gc.collect()
torch.cuda.empty_cache()
def _a ( self : Any ):
'''simple docstring'''
A_ : Tuple = load_numpy(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/kandinskyv22_controlnet_robotcat_fp16.npy""" )
A_ : Optional[int] = load_image(
"""https://huggingface.co/datasets/hf-internal-testing/diffusers-images/resolve/main"""
"""/kandinskyv22/hint_image_cat.png""" )
A_ : Optional[int] = torch.from_numpy(np.array(_a ) ).float() / 255.0
A_ : List[Any] = hint.permute(2 ,0 ,1 ).unsqueeze(0 )
A_ : List[Any] = KandinskyVaaPriorPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-prior""" ,torch_dtype=torch.floataa )
pipe_prior.to(_a )
A_ : Union[str, Any] = KandinskyVaaControlnetPipeline.from_pretrained(
"""kandinsky-community/kandinsky-2-2-controlnet-depth""" ,torch_dtype=torch.floataa )
A_ : Union[str, Any] = pipeline.to(_a )
pipeline.set_progress_bar_config(disable=_a )
A_ : Optional[Any] = """A robot, 4k photo"""
A_ : Any = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ , A_ : List[str] = pipe_prior(
_a ,generator=_a ,num_inference_steps=5 ,negative_prompt="""""" ,).to_tuple()
A_ : int = torch.Generator(device="""cuda""" ).manual_seed(0 )
A_ : List[Any] = pipeline(
image_embeds=_a ,negative_image_embeds=_a ,hint=_a ,generator=_a ,num_inference_steps=100 ,output_type="""np""" ,)
A_ : Dict = output.images[0]
assert image.shape == (512, 512, 3)
assert_mean_pixel_difference(_a ,_a )
| 27 | 1 |
'''simple docstring'''
import collections.abc
from typing import Optional, Tuple, Union
import torch
import torch.utils.checkpoint
from torch import nn
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
from ...activations import ACTaFN
from ...modeling_outputs import BaseModelOutputWithNoAttention, ImageClassifierOutputWithNoAttention
from ...modeling_utils import PreTrainedModel
from ...utils import add_code_sample_docstrings, add_start_docstrings, add_start_docstrings_to_model_forward, logging
from .configuration_poolformer import PoolFormerConfig
__magic_name__ = logging.get_logger(__name__)
# General docstring
__magic_name__ = 'PoolFormerConfig'
# Base docstring
__magic_name__ = 'sail/poolformer_s12'
__magic_name__ = [1, 512, 7, 7]
# Image classification docstring
__magic_name__ = 'sail/poolformer_s12'
__magic_name__ = 'tabby, tabby cat'
__magic_name__ = [
'sail/poolformer_s12',
# See all PoolFormer models at https://huggingface.co/models?filter=poolformer
]
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : float = 0.0 , lowerCamelCase : bool = False):
if drop_prob == 0.0 or not training:
return input
A_ : int = 1 - drop_prob
A_ : str = (input.shape[0],) + (1,) * (input.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
A_ : int = keep_prob + torch.rand(lowerCamelCase , dtype=input.dtype , device=input.device)
random_tensor.floor_() # binarize
A_ : List[Any] = input.div(lowerCamelCase) * random_tensor
return output
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_a : Optional[float] = None ):
'''simple docstring'''
super().__init__()
A_ : str = drop_prob
def _a ( self : Union[str, Any] ,_a : torch.Tensor ):
'''simple docstring'''
return drop_path(_a ,self.drop_prob ,self.training )
def _a ( self : Dict ):
'''simple docstring'''
return "p={}".format(self.drop_prob )
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any ,_a : Dict ,_a : Optional[int] ,_a : Any ,_a : Optional[int] ,_a : Tuple ,_a : str=None ):
'''simple docstring'''
super().__init__()
A_ : str = patch_size if isinstance(_a ,collections.abc.Iterable ) else (patch_size, patch_size)
A_ : Dict = stride if isinstance(_a ,collections.abc.Iterable ) else (stride, stride)
A_ : Any = padding if isinstance(_a ,collections.abc.Iterable ) else (padding, padding)
A_ : List[Any] = nn.Convad(_a ,_a ,kernel_size=_a ,stride=_a ,padding=_a )
A_ : Dict = norm_layer(_a ) if norm_layer else nn.Identity()
def _a ( self : List[str] ,_a : int ):
'''simple docstring'''
A_ : str = self.projection(_a )
A_ : Tuple = self.norm(_a )
return embeddings
class __lowerCAmelCase ( nn.GroupNorm ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,_a : Optional[int] ,**_a : Dict ):
'''simple docstring'''
super().__init__(1 ,_a ,**_a )
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[Any] ,_a : int ):
'''simple docstring'''
super().__init__()
A_ : Tuple = nn.AvgPoolad(_a ,stride=1 ,padding=pool_size // 2 ,count_include_pad=_a )
def _a ( self : Dict ,_a : Optional[int] ):
'''simple docstring'''
return self.pool(_a ) - hidden_states
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Any ,_a : List[str] ,_a : Any ,_a : int ,_a : int ):
'''simple docstring'''
super().__init__()
A_ : Dict = nn.Convad(_a ,_a ,1 )
A_ : Union[str, Any] = nn.Convad(_a ,_a ,1 )
A_ : Optional[Any] = PoolFormerDropPath(_a )
if isinstance(config.hidden_act ,_a ):
A_ : Union[str, Any] = ACTaFN[config.hidden_act]
else:
A_ : Dict = config.hidden_act
def _a ( self : str ,_a : Tuple ):
'''simple docstring'''
A_ : Any = self.conva(_a )
A_ : str = self.act_fn(_a )
A_ : Optional[int] = self.drop(_a )
A_ : List[str] = self.conva(_a )
A_ : List[str] = self.drop(_a )
return hidden_states
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : List[Any] ,_a : Any ,_a : Tuple ,_a : List[Any] ,_a : Tuple ,_a : Optional[Any] ,_a : List[Any] ):
'''simple docstring'''
super().__init__()
A_ : List[Any] = PoolFormerPooling(_a )
A_ : str = PoolFormerOutput(_a ,_a ,_a ,_a )
A_ : List[Any] = PoolFormerGroupNorm(_a )
A_ : Optional[Any] = PoolFormerGroupNorm(_a )
# Useful for training neural nets
A_ : Optional[int] = PoolFormerDropPath(_a ) if drop_path > 0.0 else nn.Identity()
A_ : Any = config.use_layer_scale
if config.use_layer_scale:
A_ : Optional[int] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_a) ) ,requires_grad=_a )
A_ : Union[str, Any] = nn.Parameter(
config.layer_scale_init_value * torch.ones((_a) ) ,requires_grad=_a )
def _a ( self : Optional[int] ,_a : Any ):
'''simple docstring'''
if self.use_layer_scale:
A_ : Optional[Any] = self.pooling(self.before_norm(_a ) )
A_ : str = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * pooling_output
# First residual connection
A_ : Optional[int] = hidden_states + self.drop_path(_a )
A_ : Optional[Any] = ()
A_ : List[Any] = self.output(self.after_norm(_a ) )
A_ : str = self.layer_scale_a.unsqueeze(-1 ).unsqueeze(-1 ) * layer_output
# Second residual connection
A_ : Union[str, Any] = hidden_states + self.drop_path(_a )
A_ : Dict = (output,) + outputs
return outputs
else:
A_ : Union[str, Any] = self.drop_path(self.pooling(self.before_norm(_a ) ) )
# First residual connection
A_ : Any = pooling_output + hidden_states
A_ : List[str] = ()
# Second residual connection inside the PoolFormerOutput block
A_ : List[Any] = self.drop_path(self.output(self.after_norm(_a ) ) )
A_ : str = hidden_states + layer_output
A_ : Optional[Any] = (output,) + outputs
return outputs
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
super().__init__()
A_ : Any = config
# stochastic depth decay rule
A_ : Dict = [x.item() for x in torch.linspace(0 ,config.drop_path_rate ,sum(config.depths ) )]
# patch embeddings
A_ : Tuple = []
for i in range(config.num_encoder_blocks ):
embeddings.append(
PoolFormerEmbeddings(
patch_size=config.patch_sizes[i] ,stride=config.strides[i] ,padding=config.padding[i] ,num_channels=config.num_channels if i == 0 else config.hidden_sizes[i - 1] ,hidden_size=config.hidden_sizes[i] ,) )
A_ : Optional[Any] = nn.ModuleList(_a )
# Transformer blocks
A_ : Tuple = []
A_ : int = 0
for i in range(config.num_encoder_blocks ):
# each block consists of layers
A_ : str = []
if i != 0:
cur += config.depths[i - 1]
for j in range(config.depths[i] ):
layers.append(
PoolFormerLayer(
_a ,num_channels=config.hidden_sizes[i] ,pool_size=config.pool_size ,hidden_size=config.hidden_sizes[i] ,intermediate_size=int(config.hidden_sizes[i] * config.mlp_ratio ) ,drop_path=dpr[cur + j] ,) )
blocks.append(nn.ModuleList(_a ) )
A_ : List[Any] = nn.ModuleList(_a )
def _a ( self : Tuple ,_a : List[str] ,_a : List[Any]=False ,_a : Dict=True ):
'''simple docstring'''
A_ : List[Any] = () if output_hidden_states else None
A_ : str = pixel_values
for idx, layers in enumerate(zip(self.patch_embeddings ,self.block ) ):
A_ , A_ : List[str] = layers
# Get patch embeddings from hidden_states
A_ : Any = embedding_layer(_a )
# Send the embeddings through the blocks
for _, blk in enumerate(_a ):
A_ : Tuple = blk(_a )
A_ : List[str] = layer_outputs[0]
if output_hidden_states:
A_ : Union[str, Any] = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states] if v is not None )
return BaseModelOutputWithNoAttention(last_hidden_state=_a ,hidden_states=_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = PoolFormerConfig
a_ = """poolformer"""
a_ = """pixel_values"""
a_ = True
def _a ( self : int ,_a : List[str] ):
'''simple docstring'''
if isinstance(_a ,(nn.Linear, nn.Convad) ):
module.weight.data.normal_(mean=0.0 ,std=self.config.initializer_range )
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(_a ,nn.LayerNorm ):
module.bias.data.zero_()
module.weight.data.fill_(1.0 )
def _a ( self : Any ,_a : Optional[int] ,_a : List[str]=False ):
'''simple docstring'''
if isinstance(_a ,_a ):
A_ : int = value
__magic_name__ = r'\n This model is a PyTorch [torch.nn.Module](https://pytorch.org/docs/stable/nn.html#torch.nn.Module) sub-class. Use\n it as a regular PyTorch Module and refer to the PyTorch documentation for all matter related to general usage and\n behavior.\n\n Parameters:\n config ([`PoolFormerConfig`]): Model configuration class with all the parameters of the model.\n Initializing with a config file does not load the weights associated with the model, only the\n configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.\n'
__magic_name__ = r'\n Args:\n pixel_values (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):\n Pixel values. Pixel values can be obtained using [`AutoImageProcessor`]. See\n [`PoolFormerImageProcessor.__call__`] for details.\n'
@add_start_docstrings(
"""The bare PoolFormer Model transformer outputting raw hidden-states without any specific head on top.""" , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : str ,_a : str ):
'''simple docstring'''
super().__init__(_a )
A_ : List[str] = config
A_ : Optional[Any] = PoolFormerEncoder(_a )
# Initialize weights and apply final processing
self.post_init()
def _a ( self : Optional[int] ):
'''simple docstring'''
return self.embeddings.patch_embeddings
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_CHECKPOINT_FOR_DOC ,output_type=_a ,config_class=_CONFIG_FOR_DOC ,modality="""vision""" ,expected_output=_EXPECTED_OUTPUT_SHAPE ,)
def _a ( self : List[str] ,_a : Optional[torch.FloatTensor] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,):
'''simple docstring'''
A_ : Any = (
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
)
A_ : Union[str, Any] = return_dict if return_dict is not None else self.config.use_return_dict
if pixel_values is None:
raise ValueError("""You have to specify pixel_values""" )
A_ : List[Any] = self.encoder(
_a ,output_hidden_states=_a ,return_dict=_a ,)
A_ : int = encoder_outputs[0]
if not return_dict:
return (sequence_output, None) + encoder_outputs[1:]
return BaseModelOutputWithNoAttention(
last_hidden_state=_a ,hidden_states=encoder_outputs.hidden_states ,)
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Optional[int] ,_a : str ):
'''simple docstring'''
super().__init__()
A_ : Optional[int] = nn.Linear(config.hidden_size ,config.hidden_size )
def _a ( self : str ,_a : Any ):
'''simple docstring'''
A_ : Optional[Any] = self.dense(_a )
return output
@add_start_docstrings(
"""
PoolFormer Model transformer with an image classification head on top
""" , __SCREAMING_SNAKE_CASE , )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,_a : List[str] ):
'''simple docstring'''
super().__init__(_a )
A_ : List[str] = config.num_labels
A_ : str = PoolFormerModel(_a )
# Final norm
A_ : Union[str, Any] = PoolFormerGroupNorm(config.hidden_sizes[-1] )
# Classifier head
A_ : Optional[Any] = (
nn.Linear(config.hidden_sizes[-1] ,config.num_labels ) if config.num_labels > 0 else nn.Identity()
)
# Initialize weights and apply final processing
self.post_init()
@add_start_docstrings_to_model_forward(_a )
@add_code_sample_docstrings(
checkpoint=_IMAGE_CLASS_CHECKPOINT ,output_type=_a ,config_class=_CONFIG_FOR_DOC ,expected_output=_IMAGE_CLASS_EXPECTED_OUTPUT ,)
def _a ( self : str ,_a : Optional[torch.FloatTensor] = None ,_a : Optional[torch.LongTensor] = None ,_a : Optional[bool] = None ,_a : Optional[bool] = None ,):
'''simple docstring'''
A_ : int = return_dict if return_dict is not None else self.config.use_return_dict
A_ : Tuple = self.poolformer(
_a ,output_hidden_states=_a ,return_dict=_a ,)
A_ : Optional[Any] = outputs[0]
A_ : Optional[int] = self.classifier(self.norm(_a ).mean([-2, -1] ) )
A_ : Tuple = None
if labels is not None:
if self.config.problem_type is None:
if self.num_labels == 1:
A_ : Optional[Any] = """regression"""
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
A_ : List[Any] = """single_label_classification"""
else:
A_ : int = """multi_label_classification"""
if self.config.problem_type == "regression":
A_ : Optional[Any] = MSELoss()
if self.num_labels == 1:
A_ : Optional[int] = loss_fct(logits.squeeze() ,labels.squeeze() )
else:
A_ : List[Any] = loss_fct(_a ,_a )
elif self.config.problem_type == "single_label_classification":
A_ : List[str] = CrossEntropyLoss()
A_ : List[str] = loss_fct(logits.view(-1 ,self.num_labels ) ,labels.view(-1 ) )
elif self.config.problem_type == "multi_label_classification":
A_ : Dict = BCEWithLogitsLoss()
A_ : List[str] = loss_fct(_a ,_a )
if not return_dict:
A_ : List[Any] = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return ImageClassifierOutputWithNoAttention(loss=_a ,logits=_a ,hidden_states=outputs.hidden_states )
| 27 |
'''simple docstring'''
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Mapping, Optional, Union
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
if TYPE_CHECKING:
from ... import FeatureExtractionMixin, PreTrainedTokenizerBase, TensorType
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'microsoft/deberta-v2-xlarge': 'https://huggingface.co/microsoft/deberta-v2-xlarge/resolve/main/config.json',
'microsoft/deberta-v2-xxlarge': 'https://huggingface.co/microsoft/deberta-v2-xxlarge/resolve/main/config.json',
'microsoft/deberta-v2-xlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xlarge-mnli/resolve/main/config.json'
),
'microsoft/deberta-v2-xxlarge-mnli': (
'https://huggingface.co/microsoft/deberta-v2-xxlarge-mnli/resolve/main/config.json'
),
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """deberta-v2"""
def __init__( self : Optional[Any] ,_a : Union[str, Any]=128100 ,_a : Optional[int]=1536 ,_a : Dict=24 ,_a : int=24 ,_a : Tuple=6144 ,_a : Union[str, Any]="gelu" ,_a : List[Any]=0.1 ,_a : Dict=0.1 ,_a : int=512 ,_a : int=0 ,_a : int=0.02 ,_a : int=1e-7 ,_a : List[str]=False ,_a : Union[str, Any]=-1 ,_a : List[Any]=0 ,_a : Optional[Any]=True ,_a : Tuple=None ,_a : Any=0 ,_a : int="gelu" ,**_a : Any ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Union[str, Any] = hidden_size
A_ : Dict = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : List[Any] = intermediate_size
A_ : List[Any] = hidden_act
A_ : Optional[int] = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : int = max_position_embeddings
A_ : Any = type_vocab_size
A_ : List[Any] = initializer_range
A_ : int = relative_attention
A_ : Tuple = max_relative_positions
A_ : int = pad_token_id
A_ : Tuple = position_biased_input
# Backwards compatibility
if type(_a ) == str:
A_ : str = [x.strip() for x in pos_att_type.lower().split("""|""" )]
A_ : Any = pos_att_type
A_ : Optional[int] = vocab_size
A_ : Tuple = layer_norm_eps
A_ : Any = kwargs.get("""pooler_hidden_size""" ,_a )
A_ : Union[str, Any] = pooler_dropout
A_ : List[Any] = pooler_hidden_act
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : Any ):
'''simple docstring'''
if self.task == "multiple-choice":
A_ : Any = {0: """batch""", 1: """choice""", 2: """sequence"""}
else:
A_ : Any = {0: """batch""", 1: """sequence"""}
if self._config.type_vocab_size > 0:
return OrderedDict(
[("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis), ("""token_type_ids""", dynamic_axis)] )
else:
return OrderedDict([("""input_ids""", dynamic_axis), ("""attention_mask""", dynamic_axis)] )
@property
def _a ( self : Optional[int] ):
'''simple docstring'''
return 12
def _a ( self : int ,_a : Union["PreTrainedTokenizerBase", "FeatureExtractionMixin"] ,_a : int = -1 ,_a : int = -1 ,_a : int = -1 ,_a : bool = False ,_a : Optional["TensorType"] = None ,_a : int = 3 ,_a : int = 40 ,_a : int = 40 ,_a : "PreTrainedTokenizerBase" = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(preprocessor=_a ,framework=_a )
if self._config.type_vocab_size == 0 and "token_type_ids" in dummy_inputs:
del dummy_inputs["token_type_ids"]
return dummy_inputs
| 27 | 1 |
'''simple docstring'''
import os
import numpy
import onnx
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Tuple):
A_ : Tuple = a.name
A_ : int = b.name
A_ : str = """"""
A_ : List[str] = """"""
A_ : str = a == b
A_ : Union[str, Any] = name_a
A_ : Optional[int] = name_b
return res
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : List[str] , lowerCamelCase : str):
for i, input_name in enumerate(node_proto.input):
if input_name == name:
node_proto.input.insert(lowerCamelCase , lowerCamelCase)
node_proto.input.pop(i + 1)
if node_proto.op_type == "If":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase)
_graph_replace_input_with(node_proto.attribute[1].g , lowerCamelCase , lowerCamelCase)
if node_proto.op_type == "Loop":
_graph_replace_input_with(node_proto.attribute[0].g , lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Any , lowerCamelCase : Any):
for n in graph_proto.node:
_node_replace_input_with(lowerCamelCase , lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Dict , lowerCamelCase : List[Any] , lowerCamelCase : Tuple):
A_ : int = list(model.graph.initializer)
A_ : str = list(model_without_ext.graph.initializer)
for i, ref_i in ind_to_replace:
assert inits_with_data[i].name == inits[i].name
assert inits_with_data[ref_i].name == inits[ref_i].name
assert i > ref_i
A_ : Tuple = inits[i].name
A_ : List[str] = inits[ref_i].name
model_without_ext.graph.initializer.remove(inits[i])
# for n in model.graph.node:
_graph_replace_input_with(model_without_ext.graph , lowerCamelCase , lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Optional[int]):
A_ : Any = os.path.dirname(lowerCamelCase)
A_ : Optional[Any] = os.path.basename(lowerCamelCase)
A_ : Tuple = onnx.load(os.path.join(lowerCamelCase , lowerCamelCase))
A_ : str = list(model.graph.initializer)
A_ : Union[str, Any] = set()
A_ : Dict = {}
A_ : int = []
A_ : Optional[Any] = 0
for i in range(len(lowerCamelCase)):
if i in dup_set:
continue
for j in range(i + 1 , len(lowerCamelCase)):
if j in dup_set:
continue
if _is_equal_tensor_proto(inits[i] , inits[j]):
dup_set.add(lowerCamelCase)
dup_set.add(lowerCamelCase)
A_ : str = inits[j].data_type
A_ : int = numpy.prod(inits[j].dims)
if dtype == 1:
mem_size *= 4
elif dtype == 6:
mem_size *= 4
elif dtype == 7 or dtype == 11:
mem_size *= 8
else:
print("""unexpected data type: """ , lowerCamelCase)
total_reduced_size += mem_size
A_ : List[Any] = inits[i].name
A_ : List[Any] = inits[j].name
if name_i in dup_map:
dup_map[name_i].append(lowerCamelCase)
else:
A_ : Any = [name_j]
ind_to_replace.append((j, i))
print("""total reduced size: """ , total_reduced_size / 1024 / 1024 / 1024 , """GB""")
A_ : List[Any] = sorted(lowerCamelCase)
_remove_dup_initializers_from_model(lowerCamelCase , lowerCamelCase , lowerCamelCase)
A_ : Optional[int] = """optimized_""" + model_file_name
A_ : List[str] = os.path.join(lowerCamelCase , lowerCamelCase)
onnx.save(lowerCamelCase , lowerCamelCase)
return new_model
| 27 |
'''simple docstring'''
import sys
import webbrowser
import requests
from bsa import BeautifulSoup
from fake_useragent import UserAgent
if __name__ == "__main__":
print('Googling.....')
__magic_name__ = 'https://www.google.com/search?q=' + ' '.join(sys.argv[1:])
__magic_name__ = requests.get(url, headers={'UserAgent': UserAgent().random})
# res.raise_for_status()
with open('project1a.html', 'wb') as out_file: # only for knowing the class
for data in res.iter_content(10_000):
out_file.write(data)
__magic_name__ = BeautifulSoup(res.text, 'html.parser')
__magic_name__ = list(soup.select('.eZt8xd'))[:5]
print(len(links))
for link in links:
if link.text == "Maps":
webbrowser.open(link.get('href'))
else:
webbrowser.open(f"""https://google.com{link.get('href')}""")
| 27 | 1 |
'''simple docstring'''
from collections.abc import Callable
def lowerCamelCase ( lowerCamelCase : Callable[[float], float] , lowerCamelCase : float , lowerCamelCase : float):
A_ : float = a
A_ : float = b
if function(lowerCamelCase) == 0: # one of the a or b is a root for the function
return a
elif function(lowerCamelCase) == 0:
return b
elif (
function(lowerCamelCase) * function(lowerCamelCase) > 0
): # if none of these are root and they are both positive or negative,
# then this algorithm can't find the root
raise ValueError("""could not find root in given interval.""")
else:
A_ : float = start + (end - start) / 2.0
while abs(start - mid) > 10**-7: # until precisely equals to 10^-7
if function(lowerCamelCase) == 0:
return mid
elif function(lowerCamelCase) * function(lowerCamelCase) < 0:
A_ : List[str] = mid
else:
A_ : Dict = mid
A_ : Any = start + (end - start) / 2.0
return mid
def lowerCamelCase ( lowerCamelCase : float):
return x**3 - 2 * x - 5
if __name__ == "__main__":
print(bisection(f, 1, 1_000))
import doctest
doctest.testmod()
| 27 |
'''simple docstring'''
from ... import PretrainedConfig
__magic_name__ = {
'sijunhe/nezha-cn-base': 'https://huggingface.co/sijunhe/nezha-cn-base/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = NEZHA_PRETRAINED_CONFIG_ARCHIVE_MAP
a_ = """nezha"""
def __init__( self : int ,_a : Union[str, Any]=21128 ,_a : int=768 ,_a : Any=12 ,_a : List[str]=12 ,_a : str=3072 ,_a : int="gelu" ,_a : int=0.1 ,_a : str=0.1 ,_a : Tuple=512 ,_a : List[Any]=64 ,_a : Dict=2 ,_a : List[Any]=0.02 ,_a : Optional[Any]=1e-12 ,_a : List[Any]=0.1 ,_a : Union[str, Any]=0 ,_a : Any=2 ,_a : Union[str, Any]=3 ,_a : int=True ,**_a : int ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Any = num_hidden_layers
A_ : List[Any] = num_attention_heads
A_ : Tuple = hidden_act
A_ : List[Any] = intermediate_size
A_ : List[str] = hidden_dropout_prob
A_ : Tuple = attention_probs_dropout_prob
A_ : Dict = max_position_embeddings
A_ : Optional[Any] = max_relative_position
A_ : List[Any] = type_vocab_size
A_ : int = initializer_range
A_ : Tuple = layer_norm_eps
A_ : Dict = classifier_dropout
A_ : int = use_cache
| 27 | 1 |
'''simple docstring'''
import sys
from .dependency_versions_table import deps
from .utils.versions import require_version, require_version_core
# define which module versions we always want to check at run time
# (usually the ones defined in `install_requires` in setup.py)
#
# order specific notes:
# - tqdm must be checked before tokenizers
__magic_name__ = 'python tqdm regex requests packaging filelock numpy tokenizers'.split()
if sys.version_info < (3, 7):
pkgs_to_check_at_runtime.append('dataclasses')
if sys.version_info < (3, 8):
pkgs_to_check_at_runtime.append('importlib_metadata')
for pkg in pkgs_to_check_at_runtime:
if pkg in deps:
if pkg == "tokenizers":
# must be loaded here, or else tqdm check may fail
from .utils import is_tokenizers_available
if not is_tokenizers_available():
continue # not required, check version only if installed
require_version_core(deps[pkg])
else:
raise ValueError(f"""can't find {pkg} in {deps.keys()}, check dependency_versions_table.py""")
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : Any=None):
require_version(deps[pkg] , lowerCamelCase)
| 27 |
'''simple docstring'''
from __future__ import annotations
def lowerCamelCase ( lowerCamelCase : dict , lowerCamelCase : str):
A_ , A_ : List[Any] = set(lowerCamelCase), [start]
while stack:
A_ : Optional[Any] = stack.pop()
explored.add(lowerCamelCase)
# Differences from BFS:
# 1) pop last element instead of first one
# 2) add adjacent elements to stack without exploring them
for adj in reversed(graph[v]):
if adj not in explored:
stack.append(lowerCamelCase)
return explored
__magic_name__ = {
'A': ['B', 'C', 'D'],
'B': ['A', 'D', 'E'],
'C': ['A', 'F'],
'D': ['B', 'D'],
'E': ['B', 'F'],
'F': ['C', 'E', 'G'],
'G': ['F'],
}
if __name__ == "__main__":
import doctest
doctest.testmod()
print(depth_first_search(G, 'A'))
| 27 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int = 1000):
A_ : List[str] = -1
A_ : str = 0
for a in range(1 , n // 3):
# Solving the two equations a**2+b**2=c**2 and a+b+c=N eliminating c
A_ : Dict = (n * n - 2 * a * n) // (2 * n - 2 * a)
A_ : str = n - a - b
if c * c == (a * a + b * b):
A_ : Any = a * b * c
if candidate >= product:
A_ : Any = candidate
return product
if __name__ == "__main__":
print(f"""{solution() = }""")
| 27 |
'''simple docstring'''
import argparse
import json
from pathlib import Path
import requests
import torch
from huggingface_hub import hf_hub_download
from PIL import Image
from transformers import (
MobileViTConfig,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTImageProcessor,
)
from transformers.utils import logging
logging.set_verbosity_info()
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Dict):
A_ : List[str] = MobileViTConfig()
# size of the architecture
if "mobilevit_s" in mobilevit_name:
A_ : Union[str, Any] = [144, 192, 240]
A_ : int = [16, 32, 64, 96, 128, 160, 640]
elif "mobilevit_xs" in mobilevit_name:
A_ : List[str] = [96, 120, 144]
A_ : Any = [16, 32, 48, 64, 80, 96, 384]
elif "mobilevit_xxs" in mobilevit_name:
A_ : Any = [64, 80, 96]
A_ : List[str] = [16, 16, 24, 48, 64, 80, 320]
A_ : Any = 0.05
A_ : List[Any] = 2.0
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : int = 512
A_ : Optional[int] = 16
A_ : List[Any] = 21
A_ : List[str] = """pascal-voc-id2label.json"""
else:
A_ : str = 1000
A_ : Any = """imagenet-1k-id2label.json"""
A_ : Any = """huggingface/label-files"""
A_ : List[str] = json.load(open(hf_hub_download(lowerCamelCase , lowerCamelCase , repo_type="""dataset""") , """r"""))
A_ : str = {int(lowerCamelCase): v for k, v in idalabel.items()}
A_ : Any = idalabel
A_ : List[str] = {v: k for k, v in idalabel.items()}
return config
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : int=False):
for i in range(1 , 6):
if F'layer_{i}.' in name:
A_ : Tuple = name.replace(F'layer_{i}.' , F'encoder.layer.{i - 1}.')
if "conv_1." in name:
A_ : Union[str, Any] = name.replace("""conv_1.""" , """conv_stem.""")
if ".block." in name:
A_ : Optional[Any] = name.replace(""".block.""" , """.""")
if "exp_1x1" in name:
A_ : Union[str, Any] = name.replace("""exp_1x1""" , """expand_1x1""")
if "red_1x1" in name:
A_ : int = name.replace("""red_1x1""" , """reduce_1x1""")
if ".local_rep.conv_3x3." in name:
A_ : List[str] = name.replace(""".local_rep.conv_3x3.""" , """.conv_kxk.""")
if ".local_rep.conv_1x1." in name:
A_ : Optional[int] = name.replace(""".local_rep.conv_1x1.""" , """.conv_1x1.""")
if ".norm." in name:
A_ : Tuple = name.replace(""".norm.""" , """.normalization.""")
if ".conv." in name:
A_ : List[Any] = name.replace(""".conv.""" , """.convolution.""")
if ".conv_proj." in name:
A_ : str = name.replace(""".conv_proj.""" , """.conv_projection.""")
for i in range(0 , 2):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Tuple = name.replace(F'.{i}.{j}.' , F'.{i}.layer.{j}.')
for i in range(2 , 6):
for j in range(0 , 4):
if F'.{i}.{j}.' in name:
A_ : Dict = name.replace(F'.{i}.{j}.' , F'.{i}.')
if "expand_1x1" in name:
A_ : Union[str, Any] = name.replace("""expand_1x1""" , """downsampling_layer.expand_1x1""")
if "conv_3x3" in name:
A_ : str = name.replace("""conv_3x3""" , """downsampling_layer.conv_3x3""")
if "reduce_1x1" in name:
A_ : Union[str, Any] = name.replace("""reduce_1x1""" , """downsampling_layer.reduce_1x1""")
for i in range(2 , 5):
if F'.global_rep.{i}.weight' in name:
A_ : List[Any] = name.replace(F'.global_rep.{i}.weight' , """.layernorm.weight""")
if F'.global_rep.{i}.bias' in name:
A_ : Optional[int] = name.replace(F'.global_rep.{i}.bias' , """.layernorm.bias""")
if ".global_rep." in name:
A_ : Optional[Any] = name.replace(""".global_rep.""" , """.transformer.""")
if ".pre_norm_mha.0." in name:
A_ : int = name.replace(""".pre_norm_mha.0.""" , """.layernorm_before.""")
if ".pre_norm_mha.1.out_proj." in name:
A_ : Dict = name.replace(""".pre_norm_mha.1.out_proj.""" , """.attention.output.dense.""")
if ".pre_norm_ffn.0." in name:
A_ : Dict = name.replace(""".pre_norm_ffn.0.""" , """.layernorm_after.""")
if ".pre_norm_ffn.1." in name:
A_ : Any = name.replace(""".pre_norm_ffn.1.""" , """.intermediate.dense.""")
if ".pre_norm_ffn.4." in name:
A_ : Union[str, Any] = name.replace(""".pre_norm_ffn.4.""" , """.output.dense.""")
if ".transformer." in name:
A_ : Any = name.replace(""".transformer.""" , """.transformer.layer.""")
if ".aspp_layer." in name:
A_ : int = name.replace(""".aspp_layer.""" , """.""")
if ".aspp_pool." in name:
A_ : Tuple = name.replace(""".aspp_pool.""" , """.""")
if "seg_head." in name:
A_ : Optional[int] = name.replace("""seg_head.""" , """segmentation_head.""")
if "segmentation_head.classifier.classifier." in name:
A_ : List[str] = name.replace("""segmentation_head.classifier.classifier.""" , """segmentation_head.classifier.""")
if "classifier.fc." in name:
A_ : str = name.replace("""classifier.fc.""" , """classifier.""")
elif (not base_model) and ("segmentation_head." not in name):
A_ : str = """mobilevit.""" + name
return name
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[int]=False):
if base_model:
A_ : Dict = """"""
else:
A_ : Any = """mobilevit."""
for key in orig_state_dict.copy().keys():
A_ : List[Any] = orig_state_dict.pop(lowerCamelCase)
if key[:8] == "encoder.":
A_ : int = key[8:]
if "qkv" in key:
A_ : Any = key.split(""".""")
A_ : str = int(key_split[0][6:]) - 1
A_ : int = int(key_split[3])
A_ : Optional[Any] = model.get_submodule(F'{model_prefix}encoder.layer.{layer_num}')
A_ : Tuple = layer.transformer.layer[transformer_num].attention.attention.all_head_size
A_ : Optional[Any] = (
F'{model_prefix}encoder.layer.{layer_num}.transformer.layer.{transformer_num}.attention.attention.'
)
if "weight" in key:
A_ : Dict = val[:dim, :]
A_ : Optional[int] = val[dim : dim * 2, :]
A_ : List[Any] = val[-dim:, :]
else:
A_ : Optional[Any] = val[:dim]
A_ : List[Any] = val[dim : dim * 2]
A_ : Any = val[-dim:]
else:
A_ : List[str] = val
return orig_state_dict
def lowerCamelCase ( ):
A_ : List[Any] = """http://images.cocodataset.org/val2017/000000039769.jpg"""
A_ : Dict = Image.open(requests.get(lowerCamelCase , stream=lowerCamelCase).raw)
return im
@torch.no_grad()
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Any , lowerCamelCase : Optional[int] , lowerCamelCase : int=False):
A_ : Optional[Any] = get_mobilevit_config(lowerCamelCase)
# load original state_dict
A_ : List[Any] = torch.load(lowerCamelCase , map_location="""cpu""")
# load 🤗 model
if mobilevit_name.startswith("""deeplabv3_"""):
A_ : List[str] = MobileViTForSemanticSegmentation(lowerCamelCase).eval()
else:
A_ : str = MobileViTForImageClassification(lowerCamelCase).eval()
A_ : str = convert_state_dict(lowerCamelCase , lowerCamelCase)
model.load_state_dict(lowerCamelCase)
# Check outputs on an image, prepared by MobileViTImageProcessor
A_ : Optional[Any] = MobileViTImageProcessor(crop_size=config.image_size , size=config.image_size + 32)
A_ : Any = image_processor(images=prepare_img() , return_tensors="""pt""")
A_ : List[Any] = model(**lowerCamelCase)
A_ : Dict = outputs.logits
if mobilevit_name.startswith("""deeplabv3_"""):
assert logits.shape == (1, 21, 32, 32)
if mobilevit_name == "deeplabv3_mobilevit_s":
A_ : int = torch.tensor(
[
[[6.2065, 6.1292, 6.2070], [6.1079, 6.1254, 6.1747], [6.0042, 6.1071, 6.1034]],
[[-6.9253, -6.8653, -7.0398], [-7.3218, -7.3983, -7.3670], [-7.1961, -7.2482, -7.1569]],
[[-4.4723, -4.4348, -4.3769], [-5.3629, -5.4632, -5.4598], [-5.1587, -5.3402, -5.5059]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xs":
A_ : Tuple = torch.tensor(
[
[[5.4449, 5.5733, 5.6314], [5.1815, 5.3930, 5.5963], [5.1656, 5.4333, 5.4853]],
[[-9.4423, -9.7766, -9.6714], [-9.1581, -9.5720, -9.5519], [-9.1006, -9.6458, -9.5703]],
[[-7.7721, -7.3716, -7.1583], [-8.4599, -8.0624, -7.7944], [-8.4172, -7.8366, -7.5025]],
])
elif mobilevit_name == "deeplabv3_mobilevit_xxs":
A_ : Tuple = torch.tensor(
[
[[6.9811, 6.9743, 7.3123], [7.1777, 7.1931, 7.3938], [7.5633, 7.8050, 7.8901]],
[[-10.5536, -10.2332, -10.2924], [-10.2336, -9.8624, -9.5964], [-10.8840, -10.8158, -10.6659]],
[[-3.4938, -3.0631, -2.8620], [-3.4205, -2.8135, -2.6875], [-3.4179, -2.7945, -2.8750]],
])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3, :3, :3] , lowerCamelCase , atol=1E-4)
else:
assert logits.shape == (1, 1000)
if mobilevit_name == "mobilevit_s":
A_ : Tuple = torch.tensor([-0.9866, 0.2392, -1.1241])
elif mobilevit_name == "mobilevit_xs":
A_ : Any = torch.tensor([-2.4761, -0.9399, -1.9587])
elif mobilevit_name == "mobilevit_xxs":
A_ : Union[str, Any] = torch.tensor([-1.9364, -1.2327, -0.4653])
else:
raise ValueError(F'Unknown mobilevit_name: {mobilevit_name}')
assert torch.allclose(logits[0, :3] , lowerCamelCase , atol=1E-4)
Path(lowerCamelCase).mkdir(exist_ok=lowerCamelCase)
print(F'Saving model {mobilevit_name} to {pytorch_dump_folder_path}')
model.save_pretrained(lowerCamelCase)
print(F'Saving image processor to {pytorch_dump_folder_path}')
image_processor.save_pretrained(lowerCamelCase)
if push_to_hub:
A_ : str = {
"""mobilevit_s""": """mobilevit-small""",
"""mobilevit_xs""": """mobilevit-x-small""",
"""mobilevit_xxs""": """mobilevit-xx-small""",
"""deeplabv3_mobilevit_s""": """deeplabv3-mobilevit-small""",
"""deeplabv3_mobilevit_xs""": """deeplabv3-mobilevit-x-small""",
"""deeplabv3_mobilevit_xxs""": """deeplabv3-mobilevit-xx-small""",
}
print("""Pushing to the hub...""")
A_ : Union[str, Any] = model_mapping[mobilevit_name]
image_processor.push_to_hub(lowerCamelCase , organization="""apple""")
model.push_to_hub(lowerCamelCase , organization="""apple""")
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--mobilevit_name',
default='mobilevit_s',
type=str,
help=(
'Name of the MobileViT model you\'d like to convert. Should be one of \'mobilevit_s\', \'mobilevit_xs\','
' \'mobilevit_xxs\', \'deeplabv3_mobilevit_s\', \'deeplabv3_mobilevit_xs\', \'deeplabv3_mobilevit_xxs\'.'
),
)
parser.add_argument(
'--checkpoint_path', required=True, type=str, help='Path to the original state dict (.pt file).'
)
parser.add_argument(
'--pytorch_dump_folder_path', required=True, type=str, help='Path to the output PyTorch model directory.'
)
parser.add_argument(
'--push_to_hub', action='store_true', help='Whether or not to push the converted model to the 🤗 hub.'
)
__magic_name__ = parser.parse_args()
convert_movilevit_checkpoint(
args.mobilevit_name, args.checkpoint_path, args.pytorch_dump_folder_path, args.push_to_hub
)
| 27 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from ...utils import logging
from .tokenization_squeezebert import SqueezeBertTokenizer
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__magic_name__ = {
'vocab_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/vocab.txt'
),
'squeezebert/squeezebert-mnli': 'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/vocab.txt',
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'squeezebert/squeezebert-uncased': (
'https://huggingface.co/squeezebert/squeezebert-uncased/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli': (
'https://huggingface.co/squeezebert/squeezebert-mnli/resolve/main/tokenizer.json'
),
'squeezebert/squeezebert-mnli-headless': (
'https://huggingface.co/squeezebert/squeezebert-mnli-headless/resolve/main/tokenizer.json'
),
},
}
__magic_name__ = {
'squeezebert/squeezebert-uncased': 512,
'squeezebert/squeezebert-mnli': 512,
'squeezebert/squeezebert-mnli-headless': 512,
}
__magic_name__ = {
'squeezebert/squeezebert-uncased': {'do_lower_case': True},
'squeezebert/squeezebert-mnli': {'do_lower_case': True},
'squeezebert/squeezebert-mnli-headless': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = SqueezeBertTokenizer
def __init__( self : Optional[int] ,_a : Optional[Any]=None ,_a : Optional[int]=None ,_a : str=True ,_a : Any="[UNK]" ,_a : str="[SEP]" ,_a : Optional[Any]="[PAD]" ,_a : List[str]="[CLS]" ,_a : List[str]="[MASK]" ,_a : Optional[Any]=True ,_a : Optional[Any]=None ,**_a : Any ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Union[str, Any] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : List[Any] = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : Union[str, Any] = do_lower_case
A_ : List[str] = strip_accents
A_ : Dict = tokenize_chinese_chars
A_ : List[str] = normalizer_class(**_a )
A_ : Any = do_lower_case
def _a ( self : int ,_a : Dict ,_a : Dict=None ):
'''simple docstring'''
A_ : Any = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : Any ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : Dict = [self.sep_token_id]
A_ : Union[str, Any] = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Optional[int] ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : Union[str, Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 27 |
'''simple docstring'''
from typing import Dict, List, Optional, Union
import numpy as np
from ...image_processing_utils import BaseImageProcessor, BatchFeature, get_size_dict
from ...image_transforms import (
center_crop,
convert_to_rgb,
get_resize_output_image_size,
normalize,
rescale,
resize,
to_channel_dimension_format,
)
from ...image_utils import (
OPENAI_CLIP_MEAN,
OPENAI_CLIP_STD,
ChannelDimension,
ImageInput,
PILImageResampling,
make_list_of_images,
to_numpy_array,
valid_images,
)
from ...utils import TensorType, is_vision_available, logging
__magic_name__ = logging.get_logger(__name__)
if is_vision_available():
import PIL
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""pixel_values"""]
def __init__( self : Optional[Any] ,_a : bool = True ,_a : Dict[str, int] = None ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : bool = True ,_a : Dict[str, int] = None ,_a : bool = True ,_a : Union[int, float] = 1 / 255 ,_a : bool = True ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = True ,**_a : Dict ,):
'''simple docstring'''
super().__init__(**_a )
A_ : Tuple = size if size is not None else {"""shortest_edge""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
A_ : Tuple = crop_size if crop_size is not None else {"""height""": 224, """width""": 224}
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a ,param_name="""crop_size""" )
A_ : Any = do_resize
A_ : List[str] = size
A_ : Union[str, Any] = resample
A_ : Dict = do_center_crop
A_ : List[str] = crop_size
A_ : Any = do_rescale
A_ : Union[str, Any] = rescale_factor
A_ : Any = do_normalize
A_ : List[str] = image_mean if image_mean is not None else OPENAI_CLIP_MEAN
A_ : List[Any] = image_std if image_std is not None else OPENAI_CLIP_STD
A_ : Tuple = do_convert_rgb
def _a ( self : Optional[int] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : PILImageResampling = PILImageResampling.BICUBIC ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[Any] ,):
'''simple docstring'''
A_ : Optional[Any] = get_size_dict(_a ,default_to_square=_a )
if "shortest_edge" not in size:
raise ValueError(f'The `size` parameter must contain the key `shortest_edge`. Got {size.keys()}' )
A_ : Tuple = get_resize_output_image_size(_a ,size=size["""shortest_edge"""] ,default_to_square=_a )
return resize(_a ,size=_a ,resample=_a ,data_format=_a ,**_a )
def _a ( self : List[Any] ,_a : np.ndarray ,_a : Dict[str, int] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Optional[int] ,):
'''simple docstring'''
A_ : Optional[int] = get_size_dict(_a )
if "height" not in size or "width" not in size:
raise ValueError(f'The `size` parameter must contain the keys (height, width). Got {size.keys()}' )
return center_crop(_a ,size=(size["""height"""], size["""width"""]) ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[int, float] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : Any ,):
'''simple docstring'''
return rescale(_a ,scale=_a ,data_format=_a ,**_a )
def _a ( self : Any ,_a : np.ndarray ,_a : Union[float, List[float]] ,_a : Union[float, List[float]] ,_a : Optional[Union[str, ChannelDimension]] = None ,**_a : List[str] ,):
'''simple docstring'''
return normalize(_a ,mean=_a ,std=_a ,data_format=_a ,**_a )
def _a ( self : Optional[Any] ,_a : ImageInput ,_a : bool = None ,_a : Dict[str, int] = None ,_a : PILImageResampling = None ,_a : bool = None ,_a : int = None ,_a : bool = None ,_a : float = None ,_a : bool = None ,_a : Optional[Union[float, List[float]]] = None ,_a : Optional[Union[float, List[float]]] = None ,_a : bool = None ,_a : Optional[Union[str, TensorType]] = None ,_a : Optional[ChannelDimension] = ChannelDimension.FIRST ,**_a : int ,):
'''simple docstring'''
A_ : Union[str, Any] = do_resize if do_resize is not None else self.do_resize
A_ : Tuple = size if size is not None else self.size
A_ : Optional[int] = get_size_dict(_a ,param_name="""size""" ,default_to_square=_a )
A_ : List[str] = resample if resample is not None else self.resample
A_ : int = do_center_crop if do_center_crop is not None else self.do_center_crop
A_ : Any = crop_size if crop_size is not None else self.crop_size
A_ : int = get_size_dict(_a ,param_name="""crop_size""" ,default_to_square=_a )
A_ : List[Any] = do_rescale if do_rescale is not None else self.do_rescale
A_ : int = rescale_factor if rescale_factor is not None else self.rescale_factor
A_ : Any = do_normalize if do_normalize is not None else self.do_normalize
A_ : int = image_mean if image_mean is not None else self.image_mean
A_ : int = image_std if image_std is not None else self.image_std
A_ : List[str] = do_convert_rgb if do_convert_rgb is not None else self.do_convert_rgb
A_ : int = make_list_of_images(_a )
if not valid_images(_a ):
raise ValueError(
"""Invalid image type. Must be of type PIL.Image.Image, numpy.ndarray, """
"""torch.Tensor, tf.Tensor or jax.ndarray.""" )
if do_resize and size is None:
raise ValueError("""Size must be specified if do_resize is True.""" )
if do_center_crop and crop_size is None:
raise ValueError("""Crop size must be specified if do_center_crop is True.""" )
if do_rescale and rescale_factor is None:
raise ValueError("""Rescale factor must be specified if do_rescale is True.""" )
if do_normalize and (image_mean is None or image_std is None):
raise ValueError("""Image mean and std must be specified if do_normalize is True.""" )
# PIL RGBA images are converted to RGB
if do_convert_rgb:
A_ : Optional[int] = [convert_to_rgb(_a ) for image in images]
# All transformations expect numpy arrays.
A_ : Dict = [to_numpy_array(_a ) for image in images]
if do_resize:
A_ : int = [self.resize(image=_a ,size=_a ,resample=_a ) for image in images]
if do_center_crop:
A_ : Tuple = [self.center_crop(image=_a ,size=_a ) for image in images]
if do_rescale:
A_ : List[str] = [self.rescale(image=_a ,scale=_a ) for image in images]
if do_normalize:
A_ : Any = [self.normalize(image=_a ,mean=_a ,std=_a ) for image in images]
A_ : List[str] = [to_channel_dimension_format(_a ,_a ) for image in images]
A_ : List[str] = {"""pixel_values""": images}
return BatchFeature(data=_a ,tensor_type=_a )
| 27 | 1 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_mobilevit': ['MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'MobileViTConfig', 'MobileViTOnnxConfig'],
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['MobileViTFeatureExtractor']
__magic_name__ = ['MobileViTImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'MobileViTForImageClassification',
'MobileViTForSemanticSegmentation',
'MobileViTModel',
'MobileViTPreTrainedModel',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST',
'TFMobileViTForImageClassification',
'TFMobileViTForSemanticSegmentation',
'TFMobileViTModel',
'TFMobileViTPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_mobilevit import MOBILEVIT_PRETRAINED_CONFIG_ARCHIVE_MAP, MobileViTConfig, MobileViTOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_mobilevit import MobileViTFeatureExtractor
from .image_processing_mobilevit import MobileViTImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_mobilevit import (
MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
MobileViTForImageClassification,
MobileViTForSemanticSegmentation,
MobileViTModel,
MobileViTPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_mobilevit import (
TF_MOBILEVIT_PRETRAINED_MODEL_ARCHIVE_LIST,
TFMobileViTForImageClassification,
TFMobileViTForSemanticSegmentation,
TFMobileViTModel,
TFMobileViTPreTrainedModel,
)
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure, module_spec=__spec__)
| 27 |
'''simple docstring'''
import warnings
from ...utils import logging
from .image_processing_owlvit import OwlViTImageProcessor
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : Union[str, Any] ,*_a : Optional[Any] ,**_a : Optional[int] ):
'''simple docstring'''
warnings.warn(
"""The class OwlViTFeatureExtractor is deprecated and will be removed in version 5 of Transformers. Please"""
""" use OwlViTImageProcessor instead.""" ,_a ,)
super().__init__(*_a ,**_a )
| 27 | 1 |
'''simple docstring'''
import importlib.metadata
import warnings
from copy import deepcopy
from packaging import version
from ..utils import logging
from .import_utils import is_accelerate_available, is_bitsandbytes_available
if is_bitsandbytes_available():
import bitsandbytes as bnb
import torch
import torch.nn as nn
from ..pytorch_utils import ConvaD
if is_accelerate_available():
from accelerate import init_empty_weights
from accelerate.utils import find_tied_parameters
__magic_name__ = logging.get_logger(__name__)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Optional[int] , lowerCamelCase : int , lowerCamelCase : Optional[Any]=None , lowerCamelCase : Optional[Any]=None):
# Recurse if needed
if "." in tensor_name:
A_ : Union[str, Any] = tensor_name.split(""".""")
for split in splits[:-1]:
A_ : List[str] = getattr(lowerCamelCase , lowerCamelCase)
if new_module is None:
raise ValueError(F'{module} has no attribute {split}.')
A_ : Any = new_module
A_ : Tuple = splits[-1]
if tensor_name not in module._parameters and tensor_name not in module._buffers:
raise ValueError(F'{module} does not have a parameter or a buffer named {tensor_name}.')
A_ : Union[str, Any] = tensor_name in module._buffers
A_ : str = getattr(lowerCamelCase , lowerCamelCase)
if old_value.device == torch.device("""meta""") and device not in ["meta", torch.device("""meta""")] and value is None:
raise ValueError(F'{tensor_name} is on the meta device, we need a `value` to put in on {device}.')
A_ : int = False
A_ : Any = False
if is_buffer or not is_bitsandbytes_available():
A_ : Tuple = False
A_ : str = False
else:
A_ : Any = hasattr(bnb.nn , """Params4bit""") and isinstance(module._parameters[tensor_name] , bnb.nn.Paramsabit)
A_ : Dict = isinstance(module._parameters[tensor_name] , bnb.nn.IntaParams)
if is_abit or is_abit:
A_ : Optional[Any] = module._parameters[tensor_name]
if param.device.type != "cuda":
if value is None:
A_ : int = old_value.to(lowerCamelCase)
elif isinstance(lowerCamelCase , torch.Tensor):
A_ : Tuple = value.to("""cpu""")
if value.dtype == torch.inta:
A_ : int = version.parse(importlib.metadata.version("""bitsandbytes""")) > version.parse(
"""0.37.2""")
if not is_abit_serializable:
raise ValueError(
"""Detected int8 weights but the version of bitsandbytes is not compatible with int8 serialization. """
"""Make sure to download the latest `bitsandbytes` version. `pip install --upgrade bitsandbytes`.""")
else:
A_ : List[str] = torch.tensor(lowerCamelCase , device="""cpu""")
# Support models using `Conv1D` in place of `nn.Linear` (e.g. gpt2) by transposing the weight matrix prior to quantization.
# Since weights are saved in the correct "orientation", we skip transposing when loading.
if issubclass(module.source_cls , lowerCamelCase) and fpaa_statistics is None:
A_ : Tuple = new_value.T
A_ : str = old_value.__dict__
if is_abit:
A_ : Dict = bnb.nn.IntaParams(lowerCamelCase , requires_grad=lowerCamelCase , **lowerCamelCase).to(lowerCamelCase)
elif is_abit:
A_ : List[str] = bnb.nn.Paramsabit(lowerCamelCase , requires_grad=lowerCamelCase , **lowerCamelCase).to(lowerCamelCase)
A_ : Union[str, Any] = new_value
if fpaa_statistics is not None:
setattr(module.weight , """SCB""" , fpaa_statistics.to(lowerCamelCase))
else:
if value is None:
A_ : Optional[Any] = old_value.to(lowerCamelCase)
elif isinstance(lowerCamelCase , torch.Tensor):
A_ : List[Any] = value.to(lowerCamelCase)
else:
A_ : Any = torch.tensor(lowerCamelCase , device=lowerCamelCase)
if is_buffer:
A_ : Dict = new_value
else:
A_ : List[Any] = nn.Parameter(lowerCamelCase , requires_grad=old_value.requires_grad)
A_ : Any = new_value
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[Any]=None , lowerCamelCase : int=None , lowerCamelCase : Optional[Any]=None , lowerCamelCase : List[Any]=False):
for name, module in model.named_children():
if current_key_name is None:
A_ : Tuple = []
current_key_name.append(lowerCamelCase)
if (isinstance(lowerCamelCase , nn.Linear) or isinstance(lowerCamelCase , lowerCamelCase)) and name not in modules_to_not_convert:
# Check if the current key is not in the `modules_to_not_convert`
if not any(key in """.""".join(lowerCamelCase) for key in modules_to_not_convert):
with init_empty_weights():
if isinstance(lowerCamelCase , lowerCamelCase):
A_ , A_ : List[Any] = module.weight.shape
else:
A_ : Optional[Any] = module.in_features
A_ : Dict = module.out_features
if quantization_config.quantization_method() == "llm_int8":
A_ : List[Any] = bnb.nn.LinearabitLt(
lowerCamelCase , lowerCamelCase , module.bias is not None , has_fpaa_weights=quantization_config.llm_inta_has_fpaa_weight , threshold=quantization_config.llm_inta_threshold , )
A_ : Any = True
else:
if (
quantization_config.llm_inta_skip_modules is not None
and name in quantization_config.llm_inta_skip_modules
):
pass
else:
A_ : Optional[int] = bnb.nn.Linearabit(
lowerCamelCase , lowerCamelCase , module.bias is not None , quantization_config.bnb_abit_compute_dtype , compress_statistics=quantization_config.bnb_abit_use_double_quant , quant_type=quantization_config.bnb_abit_quant_type , )
A_ : Tuple = True
# Store the module class in case we need to transpose the weight later
A_ : int = type(lowerCamelCase)
# Force requires grad to False to avoid unexpected errors
model._modules[name].requires_grad_(lowerCamelCase)
if len(list(module.children())) > 0:
A_ , A_ : Any = _replace_with_bnb_linear(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase , has_been_replaced=lowerCamelCase , )
# Remove the last key for recursion
current_key_name.pop(-1)
return model, has_been_replaced
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : Optional[Any]=None , lowerCamelCase : str=None , lowerCamelCase : Union[str, Any]=None):
A_ : str = ["""lm_head"""] if modules_to_not_convert is None else modules_to_not_convert
A_ , A_ : str = _replace_with_bnb_linear(
lowerCamelCase , lowerCamelCase , lowerCamelCase , lowerCamelCase)
if not has_been_replaced:
logger.warning(
"""You are loading your model in 8bit or 4bit but no linear modules were found in your model."""
""" Please double check your model architecture, or submit an issue on github if you think this is"""
""" a bug.""")
return model
def lowerCamelCase ( *lowerCamelCase : List[Any] , **lowerCamelCase : Union[str, Any]):
warnings.warn(
"""`replace_8bit_linear` will be deprecated in a future version, please use `replace_with_bnb_linear` instead""" , lowerCamelCase , )
return replace_with_bnb_linear(*lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( *lowerCamelCase : Tuple , **lowerCamelCase : Dict):
warnings.warn(
"""`set_module_8bit_tensor_to_device` will be deprecated in a future version, please use `set_module_quantized_tensor_to_device` instead""" , lowerCamelCase , )
return set_module_quantized_tensor_to_device(*lowerCamelCase , **lowerCamelCase)
def lowerCamelCase ( lowerCamelCase : Tuple):
A_ : str = deepcopy(lowerCamelCase) # this has 0 cost since it is done inside `init_empty_weights` context manager`
tied_model.tie_weights()
A_ : Optional[Any] = find_tied_parameters(lowerCamelCase)
# For compatibility with Accelerate < 0.18
if isinstance(lowerCamelCase , lowerCamelCase):
A_ : str = sum(list(tied_params.values()) , []) + list(tied_params.keys())
else:
A_ : Dict = sum(lowerCamelCase , [])
A_ : List[Any] = len(lowerCamelCase) > 0
# Check if it is a base model
A_ : Union[str, Any] = not hasattr(lowerCamelCase , model.base_model_prefix)
# Ignore this for base models (BertModel, GPT2Model, etc.)
if (not has_tied_params) and is_base_model:
return []
# otherwise they have an attached head
A_ : int = list(model.named_children())
A_ : List[Any] = [list_modules[-1][0]]
# add last module together with tied weights
A_ : Any = set(lowerCamelCase) - set(lowerCamelCase)
A_ : Any = list(set(lowerCamelCase)) + list(lowerCamelCase)
# remove ".weight" from the keys
A_ : Dict = [""".weight""", """.bias"""]
A_ : Union[str, Any] = []
for name in list_untouched:
for name_to_remove in names_to_remove:
if name_to_remove in name:
A_ : Any = name.replace(lowerCamelCase , """""")
filtered_module_names.append(lowerCamelCase)
return filtered_module_names
| 27 |
'''simple docstring'''
from sympy import diff, lambdify, symbols
from sympy.functions import * # noqa: F403
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : complex , lowerCamelCase : str = "x" , lowerCamelCase : float = 10**-10 , lowerCamelCase : int = 1 , ):
A_ : int = symbols(lowerCamelCase)
A_ : List[Any] = lambdify(lowerCamelCase , lowerCamelCase)
A_ : List[str] = lambdify(lowerCamelCase , diff(lowerCamelCase , lowerCamelCase))
A_ : str = starting_point
while True:
if diff_function(lowerCamelCase) != 0:
A_ : int = prev_guess - multiplicity * func(lowerCamelCase) / diff_function(
lowerCamelCase)
else:
raise ZeroDivisionError("""Could not find root""") from None
# Precision is checked by comparing the difference of consecutive guesses
if abs(next_guess - prev_guess) < precision:
return next_guess
A_ : Union[str, Any] = next_guess
# Let's Execute
if __name__ == "__main__":
# Find root of trigonometric function
# Find value of pi
print(f"""The root of sin(x) = 0 is {newton_raphson('sin(x)', 2)}""")
# Find root of polynomial
# Find fourth Root of 5
print(f"""The root of x**4 - 5 = 0 is {newton_raphson('x**4 -5', 0.4 +5j)}""")
# Find value of e
print(
'The root of log(y) - 1 = 0 is ',
f"""{newton_raphson('log(y) - 1', 2, variable='y')}""",
)
# Exponential Roots
print(
'The root of exp(x) - 1 = 0 is',
f"""{newton_raphson('exp(x) - 1', 10, precision=0.0_0_5)}""",
)
# Find root of cos(x)
print(f"""The root of cos(x) = 0 is {newton_raphson('cos(x)', 0)}""")
| 27 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : str , lowerCamelCase : List[Any]):
A_ : int = 0
while b > 0:
if b & 1:
res += a
a += a
b >>= 1
return res
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any]):
A_ : Dict = 0
while b > 0:
if b & 1:
A_ : Tuple = ((res % c) + (a % c)) % c
a += a
b >>= 1
return res
| 27 |
'''simple docstring'''
import json
import os
from collections import Counter
import torch
import torchvision
import torchvision.transforms as transforms
from PIL import Image
from torch import nn
from torch.utils.data import Dataset
__magic_name__ = {1: (1, 1), 2: (2, 1), 3: (3, 1), 4: (2, 2), 5: (5, 1), 6: (3, 2), 7: (7, 1), 8: (4, 2), 9: (3, 3)}
class __lowerCAmelCase ( nn.Module ):
'''simple docstring'''
def __init__( self : Dict ,_a : Dict ):
'''simple docstring'''
super().__init__()
A_ : List[str] = torchvision.models.resnetaaa(pretrained=_a )
A_ : int = list(model.children() )[:-2]
A_ : int = nn.Sequential(*_a )
A_ : Optional[int] = nn.AdaptiveAvgPoolad(POOLING_BREAKDOWN[args.num_image_embeds] )
def _a ( self : str ,_a : Optional[int] ):
'''simple docstring'''
A_ : Tuple = self.pool(self.model(_a ) )
A_ : Any = torch.flatten(_a ,start_dim=2 )
A_ : str = out.transpose(1 ,2 ).contiguous()
return out # BxNx2048
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
def __init__( self : int ,_a : Optional[Any] ,_a : Optional[Any] ,_a : Dict ,_a : Dict ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Dict = [json.loads(_a ) for l in open(_a )]
A_ : Optional[int] = os.path.dirname(_a )
A_ : Optional[Any] = tokenizer
A_ : Optional[Any] = labels
A_ : List[Any] = len(_a )
A_ : str = max_seq_length
A_ : str = transforms
def __len__( self : str ):
'''simple docstring'''
return len(self.data )
def __getitem__( self : Tuple ,_a : Optional[Any] ):
'''simple docstring'''
A_ : Optional[int] = torch.LongTensor(self.tokenizer.encode(self.data[index]["""text"""] ,add_special_tokens=_a ) )
A_ , A_ , A_ : Dict = sentence[0], sentence[1:-1], sentence[-1]
A_ : Optional[int] = sentence[: self.max_seq_length]
A_ : Any = torch.zeros(self.n_classes )
A_ : Tuple = 1
A_ : Optional[Any] = Image.open(os.path.join(self.data_dir ,self.data[index]["""img"""] ) ).convert("""RGB""" )
A_ : Union[str, Any] = self.transforms(_a )
return {
"image_start_token": start_token,
"image_end_token": end_token,
"sentence": sentence,
"image": image,
"label": label,
}
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = Counter()
for row in self.data:
label_freqs.update(row["""label"""] )
return label_freqs
def lowerCamelCase ( lowerCamelCase : str):
A_ : List[Any] = [len(row["""sentence"""]) for row in batch]
A_ , A_ : Dict = len(lowerCamelCase), max(lowerCamelCase)
A_ : Optional[int] = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
A_ : Tuple = torch.zeros(lowerCamelCase , lowerCamelCase , dtype=torch.long)
for i_batch, (input_row, length) in enumerate(zip(lowerCamelCase , lowerCamelCase)):
A_ : str = input_row["""sentence"""]
A_ : Tuple = 1
A_ : int = torch.stack([row["""image"""] for row in batch])
A_ : str = torch.stack([row["""label"""] for row in batch])
A_ : List[Any] = torch.stack([row["""image_start_token"""] for row in batch])
A_ : Tuple = torch.stack([row["""image_end_token"""] for row in batch])
return text_tensor, mask_tensor, img_tensor, img_start_token, img_end_token, tgt_tensor
def lowerCamelCase ( ):
return [
"Crime",
"Drama",
"Thriller",
"Action",
"Comedy",
"Romance",
"Documentary",
"Short",
"Mystery",
"History",
"Family",
"Adventure",
"Fantasy",
"Sci-Fi",
"Western",
"Horror",
"Sport",
"War",
"Music",
"Musical",
"Animation",
"Biography",
"Film-Noir",
]
def lowerCamelCase ( ):
return transforms.Compose(
[
transforms.Resize(256),
transforms.CenterCrop(224),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.4677_7044, 0.4453_1429, 0.4066_1017] , std=[0.1222_1994, 0.1214_5835, 0.1438_0469] , ),
])
| 27 | 1 |
'''simple docstring'''
import json
from typing import List, Optional, Tuple
from tokenizers import normalizers
from ...tokenization_utils_fast import PreTrainedTokenizerFast
from .tokenization_electra import ElectraTokenizer
__magic_name__ = {'vocab_file': 'vocab.txt', 'tokenizer_file': 'tokenizer.json'}
__magic_name__ = {
'vocab_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/vocab.txt'
),
'google/electra-base-generator': 'https://huggingface.co/google/electra-base-generator/resolve/main/vocab.txt',
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/vocab.txt'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/vocab.txt'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/vocab.txt'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/vocab.txt'
),
},
'tokenizer_file': {
'google/electra-small-generator': (
'https://huggingface.co/google/electra-small-generator/resolve/main/tokenizer.json'
),
'google/electra-base-generator': (
'https://huggingface.co/google/electra-base-generator/resolve/main/tokenizer.json'
),
'google/electra-large-generator': (
'https://huggingface.co/google/electra-large-generator/resolve/main/tokenizer.json'
),
'google/electra-small-discriminator': (
'https://huggingface.co/google/electra-small-discriminator/resolve/main/tokenizer.json'
),
'google/electra-base-discriminator': (
'https://huggingface.co/google/electra-base-discriminator/resolve/main/tokenizer.json'
),
'google/electra-large-discriminator': (
'https://huggingface.co/google/electra-large-discriminator/resolve/main/tokenizer.json'
),
},
}
__magic_name__ = {
'google/electra-small-generator': 512,
'google/electra-base-generator': 512,
'google/electra-large-generator': 512,
'google/electra-small-discriminator': 512,
'google/electra-base-discriminator': 512,
'google/electra-large-discriminator': 512,
}
__magic_name__ = {
'google/electra-small-generator': {'do_lower_case': True},
'google/electra-base-generator': {'do_lower_case': True},
'google/electra-large-generator': {'do_lower_case': True},
'google/electra-small-discriminator': {'do_lower_case': True},
'google/electra-base-discriminator': {'do_lower_case': True},
'google/electra-large-discriminator': {'do_lower_case': True},
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = VOCAB_FILES_NAMES
a_ = PRETRAINED_VOCAB_FILES_MAP
a_ = PRETRAINED_INIT_CONFIGURATION
a_ = PRETRAINED_POSITIONAL_EMBEDDINGS_SIZES
a_ = ElectraTokenizer
def __init__( self : Any ,_a : str=None ,_a : Union[str, Any]=None ,_a : Optional[int]=True ,_a : Union[str, Any]="[UNK]" ,_a : List[Any]="[SEP]" ,_a : Union[str, Any]="[PAD]" ,_a : str="[CLS]" ,_a : Union[str, Any]="[MASK]" ,_a : Optional[int]=True ,_a : Tuple=None ,**_a : List[str] ,):
'''simple docstring'''
super().__init__(
_a ,tokenizer_file=_a ,do_lower_case=_a ,unk_token=_a ,sep_token=_a ,pad_token=_a ,cls_token=_a ,mask_token=_a ,tokenize_chinese_chars=_a ,strip_accents=_a ,**_a ,)
A_ : Optional[int] = json.loads(self.backend_tokenizer.normalizer.__getstate__() )
if (
normalizer_state.get("""lowercase""" ,_a ) != do_lower_case
or normalizer_state.get("""strip_accents""" ,_a ) != strip_accents
or normalizer_state.get("""handle_chinese_chars""" ,_a ) != tokenize_chinese_chars
):
A_ : List[str] = getattr(_a ,normalizer_state.pop("""type""" ) )
A_ : str = do_lower_case
A_ : List[Any] = strip_accents
A_ : Optional[Any] = tokenize_chinese_chars
A_ : List[str] = normalizer_class(**_a )
A_ : Dict = do_lower_case
def _a ( self : List[Any] ,_a : Any ,_a : int=None ):
'''simple docstring'''
A_ : List[str] = [self.cls_token_id] + token_ids_a + [self.sep_token_id]
if token_ids_a:
output += token_ids_a + [self.sep_token_id]
return output
def _a ( self : int ,_a : List[int] ,_a : Optional[List[int]] = None ):
'''simple docstring'''
A_ : List[str] = [self.sep_token_id]
A_ : Dict = [self.cls_token_id]
if token_ids_a is None:
return len(cls + token_ids_a + sep ) * [0]
return len(cls + token_ids_a + sep ) * [0] + len(token_ids_a + sep ) * [1]
def _a ( self : Dict ,_a : str ,_a : Optional[str] = None ):
'''simple docstring'''
A_ : Optional[Any] = self._tokenizer.model.save(_a ,name=_a )
return tuple(_a )
| 27 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCamelCase : int):
if num <= 0:
A_ : List[Any] = F'{num}: Invalid input, please enter a positive integer.'
raise ValueError(lowerCamelCase)
A_ : str = [True] * (num + 1)
A_ : Tuple = []
A_ : str = 2
A_ : Any = int(math.sqrt(lowerCamelCase))
while start <= end:
# If start is a prime
if sieve[start] is True:
prime.append(lowerCamelCase)
# Set multiples of start be False
for i in range(start * start , num + 1 , lowerCamelCase):
if sieve[i] is True:
A_ : Union[str, Any] = False
start += 1
for j in range(end + 1 , num + 1):
if sieve[j] is True:
prime.append(lowerCamelCase)
return prime
if __name__ == "__main__":
print(prime_sieve(int(input('Enter a positive integer: ').strip())))
| 27 | 1 |
'''simple docstring'''
import numpy as np
__magic_name__ = [
['a', 'b', 'c', 'd', 'e'],
['f', 'g', 'h', 'i', 'k'],
['l', 'm', 'n', 'o', 'p'],
['q', 'r', 's', 't', 'u'],
['v', 'w', 'x', 'y', 'z'],
]
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Any ):
'''simple docstring'''
A_ : str = np.array(_a )
def _a ( self : Any ,_a : str ):
'''simple docstring'''
A_ , A_ : Optional[Any] = np.where(letter == self.SQUARE )
A_ : Tuple = np.concatenate([indexa + 1, indexa + 1] )
return indexes
def _a ( self : List[Any] ,_a : int ,_a : int ):
'''simple docstring'''
A_ : str = self.SQUARE[indexa - 1, indexa - 1]
return letter
def _a ( self : int ,_a : str ):
'''simple docstring'''
A_ : Dict = message.lower()
A_ : Any = message.replace(""" """ ,"""""" )
A_ : List[str] = message.replace("""j""" ,"""i""" )
A_ : int = np.empty((2, len(_a )) )
for letter_index in range(len(_a ) ):
A_ : List[Any] = self.letter_to_numbers(message[letter_index] )
A_ : Dict = numbers[0]
A_ : str = numbers[1]
A_ : Optional[Any] = first_step.reshape(2 * len(_a ) )
A_ : Tuple = """"""
for numbers_index in range(len(_a ) ):
A_ : Any = int(second_step[numbers_index * 2] )
A_ : List[Any] = int(second_step[(numbers_index * 2) + 1] )
A_ : int = self.numbers_to_letter(_a ,_a )
A_ : Dict = encoded_message + letter
return encoded_message
def _a ( self : Union[str, Any] ,_a : str ):
'''simple docstring'''
A_ : Union[str, Any] = message.lower()
message.replace(""" """ ,"""""" )
A_ : Tuple = np.empty(2 * len(_a ) )
for letter_index in range(len(_a ) ):
A_ : str = self.letter_to_numbers(message[letter_index] )
A_ : List[str] = numbers[0]
A_ : List[str] = numbers[1]
A_ : Dict = first_step.reshape((2, len(_a )) )
A_ : Union[str, Any] = """"""
for numbers_index in range(len(_a ) ):
A_ : Optional[Any] = int(second_step[0, numbers_index] )
A_ : Union[str, Any] = int(second_step[1, numbers_index] )
A_ : List[Any] = self.numbers_to_letter(_a ,_a )
A_ : List[Any] = decoded_message + letter
return decoded_message
| 27 |
'''simple docstring'''
import argparse
import logging
import os
import time
import timeit
import datasets
import numpy as np
import pycuda.autoinit # noqa: F401
import pycuda.driver as cuda
import tensorrt as trt
import torch
from absl import logging as absl_logging
from accelerate import Accelerator
from datasets import load_dataset, load_metric
from torch.utils.data import DataLoader
from utils_qa import postprocess_qa_predictions
import transformers
from transformers import AutoTokenizer, EvalPrediction, default_data_collator, set_seed
from transformers.trainer_pt_utils import nested_concat, nested_truncate
__magic_name__ = trt.Logger(trt.Logger.WARNING)
__magic_name__ = absl_logging.get_absl_logger()
absl_logger.setLevel(logging.WARNING)
__magic_name__ = logging.getLogger(__name__)
__magic_name__ = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
'--onnx_model_path',
default=None,
type=str,
required=True,
help='Path to ONNX model: ',
)
parser.add_argument(
'--output_dir',
default=None,
type=str,
required=True,
help='The output directory where the model checkpoints and predictions will be written.',
)
# Other parameters
parser.add_argument(
'--tokenizer_name',
default='',
type=str,
required=True,
help='Pretrained tokenizer name or path if not the same as model_name',
)
parser.add_argument(
'--version_2_with_negative',
action='store_true',
help='If true, the SQuAD examples contain some that do not have an answer.',
)
parser.add_argument(
'--null_score_diff_threshold',
type=float,
default=0.0,
help='If null_score - best_non_null is greater than the threshold predict null.',
)
parser.add_argument(
'--max_seq_length',
default=384,
type=int,
help=(
'The maximum total input sequence length after WordPiece tokenization. Sequences '
'longer than this will be truncated, and sequences shorter than this will be padded.'
),
)
parser.add_argument(
'--doc_stride',
default=128,
type=int,
help='When splitting up a long document into chunks, how much stride to take between chunks.',
)
parser.add_argument('--per_device_eval_batch_size', default=8, type=int, help='Batch size per GPU/CPU for evaluation.')
parser.add_argument(
'--n_best_size',
default=20,
type=int,
help='The total number of n-best predictions to generate in the nbest_predictions.json output file.',
)
parser.add_argument(
'--max_answer_length',
default=30,
type=int,
help=(
'The maximum length of an answer that can be generated. This is needed because the start '
'and end predictions are not conditioned on one another.'
),
)
parser.add_argument('--seed', type=int, default=42, help='random seed for initialization')
parser.add_argument(
'--dataset_name',
type=str,
default=None,
required=True,
help='The name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--dataset_config_name',
type=str,
default=None,
help='The configuration name of the dataset to use (via the datasets library).',
)
parser.add_argument(
'--preprocessing_num_workers', type=int, default=4, help='A csv or a json file containing the training data.'
)
parser.add_argument('--overwrite_cache', action='store_true', help='Overwrite the cached training and evaluation sets')
parser.add_argument(
'--fp16',
action='store_true',
help='Whether to use 16-bit (mixed) precision instead of 32-bit',
)
parser.add_argument(
'--int8',
action='store_true',
help='Whether to use INT8',
)
__magic_name__ = parser.parse_args()
if args.tokenizer_name:
__magic_name__ = AutoTokenizer.from_pretrained(args.tokenizer_name, use_fast=True)
else:
raise ValueError(
'You are instantiating a new tokenizer from scratch. This is not supported by this script.'
'You can do it from another script, save it, and load it from here, using --tokenizer_name.'
)
logger.info('Training/evaluation parameters %s', args)
__magic_name__ = args.per_device_eval_batch_size
__magic_name__ = (args.eval_batch_size, args.max_seq_length)
# TRT Engine properties
__magic_name__ = True
__magic_name__ = 'temp_engine/bert-fp32.engine'
if args.fpaa:
__magic_name__ = 'temp_engine/bert-fp16.engine'
if args.inta:
__magic_name__ = 'temp_engine/bert-int8.engine'
# import ONNX file
if not os.path.exists('temp_engine'):
os.makedirs('temp_engine')
__magic_name__ = 1 << (int)(trt.NetworkDefinitionCreationFlag.EXPLICIT_BATCH)
with trt.Builder(TRT_LOGGER) as builder, builder.create_network(EXPLICIT_BATCH) as network, trt.OnnxParser(
network, TRT_LOGGER
) as parser:
with open(args.onnx_model_path, 'rb') as model:
if not parser.parse(model.read()):
for error in range(parser.num_errors):
print(parser.get_error(error))
# Query input names and shapes from parsed TensorRT network
__magic_name__ = [network.get_input(i) for i in range(network.num_inputs)]
__magic_name__ = [_input.name for _input in network_inputs] # ex: ["actual_input1"]
with builder.create_builder_config() as config:
__magic_name__ = 1 << 50
if STRICT_TYPES:
config.set_flag(trt.BuilderFlag.STRICT_TYPES)
if args.fpaa:
config.set_flag(trt.BuilderFlag.FPaa)
if args.inta:
config.set_flag(trt.BuilderFlag.INTa)
__magic_name__ = builder.create_optimization_profile()
config.add_optimization_profile(profile)
for i in range(len(input_names)):
profile.set_shape(input_names[i], INPUT_SHAPE, INPUT_SHAPE, INPUT_SHAPE)
__magic_name__ = builder.build_engine(network, config)
# serialize_engine and store in file (can be directly loaded and deserialized):
with open(engine_name, 'wb') as f:
f.write(engine.serialize())
def lowerCamelCase ( lowerCamelCase : Any , lowerCamelCase : Union[str, Any] , lowerCamelCase : str , lowerCamelCase : str , lowerCamelCase : Any , lowerCamelCase : List[Any] , lowerCamelCase : str , lowerCamelCase : List[str]):
A_ : str = np.asarray(inputs["""input_ids"""] , dtype=np.intaa)
A_ : int = np.asarray(inputs["""attention_mask"""] , dtype=np.intaa)
A_ : Optional[int] = np.asarray(inputs["""token_type_ids"""] , dtype=np.intaa)
# Copy inputs
cuda.memcpy_htod_async(d_inputs[0] , input_ids.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[1] , attention_mask.ravel() , lowerCamelCase)
cuda.memcpy_htod_async(d_inputs[2] , token_type_ids.ravel() , lowerCamelCase)
# start time
A_ : List[Any] = time.time()
# Run inference
context.execute_async(
bindings=[int(lowerCamelCase) for d_inp in d_inputs] + [int(lowerCamelCase), int(lowerCamelCase)] , stream_handle=stream.handle)
# Transfer predictions back from GPU
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
cuda.memcpy_dtoh_async(lowerCamelCase , lowerCamelCase , lowerCamelCase)
# Synchronize the stream and take time
stream.synchronize()
# end time
A_ : str = time.time()
A_ : Tuple = end_time - start_time
A_ : Any = (h_outputa, h_outputa)
# print(outputs)
return outputs, infer_time
# Initialize the accelerator. We will let the accelerator handle device placement for us in this example.
__magic_name__ = Accelerator()
# Make one log on every process with the configuration for debugging.
logging.basicConfig(
format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO,
)
# Setup logging, we only want one process per machine to log things on the screen.
# accelerator.is_local_main_process is only True for one process per machine.
logger.setLevel(logging.INFO if accelerator.is_local_main_process else logging.ERROR)
if accelerator.is_local_main_process:
datasets.utils.logging.set_verbosity_warning()
transformers.utils.logging.set_verbosity_info()
else:
datasets.utils.logging.set_verbosity_error()
transformers.utils.logging.set_verbosity_error()
# If passed along, set the training seed now.
if args.seed is not None:
set_seed(args.seed)
# Get the datasets: you can either provide your own CSV/JSON/TXT training and evaluation files (see below)
# or just provide the name of one of the public datasets available on the hub at https://huggingface.co/datasets/
# (the dataset will be downloaded automatically from the datasets Hub).
#
# For CSV/JSON files, this script will use the column called 'text' or the first column if no column called
# 'text' is found. You can easily tweak this behavior (see below).
if args.dataset_name is not None:
# Downloading and loading a dataset from the hub.
__magic_name__ = load_dataset(args.dataset_name, args.dataset_config_name)
else:
raise ValueError('Evaluation requires a dataset name')
# See more about loading any type of standard or custom dataset (from files, python dict, pandas DataFrame, etc) at
# https://huggingface.co/docs/datasets/loading_datasets.html.
# Preprocessing the datasets.
# Preprocessing is slighlty different for training and evaluation.
__magic_name__ = raw_datasets['validation'].column_names
__magic_name__ = 'question' if 'question' in column_names else column_names[0]
__magic_name__ = 'context' if 'context' in column_names else column_names[1]
__magic_name__ = 'answers' if 'answers' in column_names else column_names[2]
# Padding side determines if we do (question|context) or (context|question).
__magic_name__ = tokenizer.padding_side == 'right'
if args.max_seq_length > tokenizer.model_max_length:
logger.warning(
f"""The max_seq_length passed ({args.max_seq_length}) is larger than the maximum length for the"""
f"""model ({tokenizer.model_max_length}). Using max_seq_length={tokenizer.model_max_length}."""
)
__magic_name__ = min(args.max_seq_length, tokenizer.model_max_length)
def lowerCamelCase ( lowerCamelCase : Dict):
# Some of the questions have lots of whitespace on the left, which is not useful and will make the
# truncation of the context fail (the tokenized question will take a lots of space). So we remove that
# left whitespace
A_ : List[Any] = [q.lstrip() for q in examples[question_column_name]]
# Tokenize our examples with truncation and maybe padding, but keep the overflows using a stride. This results
# in one example possible giving several features when a context is long, each of those features having a
# context that overlaps a bit the context of the previous feature.
A_ : Optional[int] = tokenizer(
examples[question_column_name if pad_on_right else context_column_name] , examples[context_column_name if pad_on_right else question_column_name] , truncation="""only_second""" if pad_on_right else """only_first""" , max_length=lowerCamelCase , stride=args.doc_stride , return_overflowing_tokens=lowerCamelCase , return_offsets_mapping=lowerCamelCase , padding="""max_length""" , )
# Since one example might give us several features if it has a long context, we need a map from a feature to
# its corresponding example. This key gives us just that.
A_ : List[str] = tokenized_examples.pop("""overflow_to_sample_mapping""")
# For evaluation, we will need to convert our predictions to substrings of the context, so we keep the
# corresponding example_id and we will store the offset mappings.
A_ : Union[str, Any] = []
for i in range(len(tokenized_examples["""input_ids"""])):
# Grab the sequence corresponding to that example (to know what is the context and what is the question).
A_ : Any = tokenized_examples.sequence_ids(lowerCamelCase)
A_ : Tuple = 1 if pad_on_right else 0
# One example can give several spans, this is the index of the example containing this span of text.
A_ : Union[str, Any] = sample_mapping[i]
tokenized_examples["example_id"].append(examples["""id"""][sample_index])
# Set to None the offset_mapping that are not part of the context so it's easy to determine if a token
# position is part of the context or not.
A_ : Dict = [
(o if sequence_ids[k] == context_index else None)
for k, o in enumerate(tokenized_examples["""offset_mapping"""][i])
]
return tokenized_examples
__magic_name__ = raw_datasets['validation']
# Validation Feature Creation
__magic_name__ = eval_examples.map(
prepare_validation_features,
batched=True,
num_proc=args.preprocessing_num_workers,
remove_columns=column_names,
load_from_cache_file=not args.overwrite_cache,
desc='Running tokenizer on validation dataset',
)
__magic_name__ = default_data_collator
__magic_name__ = eval_dataset.remove_columns(['example_id', 'offset_mapping'])
__magic_name__ = DataLoader(
eval_dataset_for_model, collate_fn=data_collator, batch_size=args.per_device_eval_batch_size
)
def lowerCamelCase ( lowerCamelCase : Tuple , lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any] , lowerCamelCase : Optional[Any]="eval"):
# Post-processing: we match the start logits and end logits to answers in the original context.
A_ : Tuple = postprocess_qa_predictions(
examples=lowerCamelCase , features=lowerCamelCase , predictions=lowerCamelCase , version_2_with_negative=args.version_2_with_negative , n_best_size=args.n_best_size , max_answer_length=args.max_answer_length , null_score_diff_threshold=args.null_score_diff_threshold , output_dir=args.output_dir , prefix=lowerCamelCase , )
# Format the result to the format the metric expects.
if args.version_2_with_negative:
A_ : Dict = [
{"""id""": k, """prediction_text""": v, """no_answer_probability""": 0.0} for k, v in predictions.items()
]
else:
A_ : Union[str, Any] = [{"""id""": k, """prediction_text""": v} for k, v in predictions.items()]
A_ : Any = [{"""id""": ex["""id"""], """answers""": ex[answer_column_name]} for ex in examples]
return EvalPrediction(predictions=lowerCamelCase , label_ids=lowerCamelCase)
__magic_name__ = load_metric('squad_v2' if args.version_2_with_negative else 'squad')
# Evaluation!
logger.info('Loading ONNX model %s for evaluation', args.onnx_model_path)
with open(engine_name, 'rb') as f, trt.Runtime(TRT_LOGGER) as runtime, runtime.deserialize_cuda_engine(
f.read()
) as engine, engine.create_execution_context() as context:
# setup for TRT inferrence
for i in range(len(input_names)):
context.set_binding_shape(i, INPUT_SHAPE)
assert context.all_binding_shapes_specified
def lowerCamelCase ( lowerCamelCase : Union[str, Any]):
return trt.volume(engine.get_binding_shape(lowerCamelCase)) * engine.get_binding_dtype(lowerCamelCase).itemsize
# Allocate device memory for inputs and outputs.
__magic_name__ = [cuda.mem_alloc(binding_nbytes(binding)) for binding in engine if engine.binding_is_input(binding)]
# Allocate output buffer
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(3)), dtype=np.floataa)
__magic_name__ = cuda.pagelocked_empty(tuple(context.get_binding_shape(4)), dtype=np.floataa)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
__magic_name__ = cuda.mem_alloc(h_outputa.nbytes)
# Create a stream in which to copy inputs/outputs and run inference.
__magic_name__ = cuda.Stream()
# Evaluation
logger.info('***** Running Evaluation *****')
logger.info(f""" Num examples = {len(eval_dataset)}""")
logger.info(f""" Batch size = {args.per_device_eval_batch_size}""")
__magic_name__ = 0.0
__magic_name__ = 0
__magic_name__ = timeit.default_timer()
__magic_name__ = None
for step, batch in enumerate(eval_dataloader):
__magic_name__ , __magic_name__ = model_infer(batch, context, d_inputs, h_outputa, h_outputa, d_outputa, d_outputa, stream)
total_time += infer_time
niter += 1
__magic_name__ , __magic_name__ = outputs
__magic_name__ = torch.tensor(start_logits)
__magic_name__ = torch.tensor(end_logits)
# necessary to pad predictions and labels for being gathered
__magic_name__ = accelerator.pad_across_processes(start_logits, dim=1, pad_index=-100)
__magic_name__ = accelerator.pad_across_processes(end_logits, dim=1, pad_index=-100)
__magic_name__ = (accelerator.gather(start_logits).cpu().numpy(), accelerator.gather(end_logits).cpu().numpy())
__magic_name__ = logits if all_preds is None else nested_concat(all_preds, logits, padding_index=-100)
if all_preds is not None:
__magic_name__ = nested_truncate(all_preds, len(eval_dataset))
__magic_name__ = timeit.default_timer() - start_time
logger.info(' Evaluation done in total %f secs (%f sec per example)', evalTime, evalTime / len(eval_dataset))
# Inference time from TRT
logger.info('Average Inference Time = {:.3f} ms'.format(total_time * 1_000 / niter))
logger.info('Total Inference Time = {:.3f} ms'.format(total_time * 1_000))
logger.info('Total Number of Inference = %d', niter)
__magic_name__ = post_processing_function(eval_examples, eval_dataset, all_preds)
__magic_name__ = metric.compute(predictions=prediction.predictions, references=prediction.label_ids)
logger.info(f"""Evaluation metrics: {eval_metric}""")
| 27 | 1 |
'''simple docstring'''
from __future__ import annotations
import math
def lowerCamelCase ( lowerCamelCase : int):
if 1 < number < 4:
# 2 and 3 are primes
return True
elif number < 2 or number % 2 == 0 or number % 3 == 0:
# Negatives, 0, 1, all even numbers, all multiples of 3 are not primes
return False
# All primes number are in format of 6k +/- 1
for i in range(5 , int(math.sqrt(lowerCamelCase) + 1) , 6):
if number % i == 0 or number % (i + 2) == 0:
return False
return True
def lowerCamelCase ( lowerCamelCase : int):
A_ : Optional[int] = str(lowerCamelCase)
A_ : Optional[int] = [n]
for i in range(1 , len(lowerCamelCase)):
list_nums.append(int(str_num[i:]))
list_nums.append(int(str_num[:-i]))
return list_nums
def lowerCamelCase ( lowerCamelCase : int):
if len(str(lowerCamelCase)) > 3:
if not is_prime(int(str(lowerCamelCase)[-3:])) or not is_prime(int(str(lowerCamelCase)[:3])):
return False
return True
def lowerCamelCase ( lowerCamelCase : int = 11):
A_ : list[int] = []
A_ : Union[str, Any] = 13
while len(lowerCamelCase) != count:
if validate(lowerCamelCase):
A_ : Dict = list_truncated_nums(lowerCamelCase)
if all(is_prime(lowerCamelCase) for i in list_nums):
list_truncated_primes.append(lowerCamelCase)
num += 2
return list_truncated_primes
def lowerCamelCase ( ):
return sum(compute_truncated_primes(11))
if __name__ == "__main__":
print(f"""{sum(compute_truncated_primes(11)) = }""")
| 27 |
'''simple docstring'''
from typing import TYPE_CHECKING
from ...utils import (
OptionalDependencyNotAvailable,
_LazyModule,
is_tf_available,
is_torch_available,
is_vision_available,
)
__magic_name__ = {
'configuration_convnext': ['CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP', 'ConvNextConfig', 'ConvNextOnnxConfig']
}
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = ['ConvNextFeatureExtractor']
__magic_name__ = ['ConvNextImageProcessor']
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST',
'ConvNextForImageClassification',
'ConvNextModel',
'ConvNextPreTrainedModel',
'ConvNextBackbone',
]
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
__magic_name__ = [
'TFConvNextForImageClassification',
'TFConvNextModel',
'TFConvNextPreTrainedModel',
]
if TYPE_CHECKING:
from .configuration_convnext import CONVNEXT_PRETRAINED_CONFIG_ARCHIVE_MAP, ConvNextConfig, ConvNextOnnxConfig
try:
if not is_vision_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .feature_extraction_convnext import ConvNextFeatureExtractor
from .image_processing_convnext import ConvNextImageProcessor
try:
if not is_torch_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_convnext import (
CONVNEXT_PRETRAINED_MODEL_ARCHIVE_LIST,
ConvNextBackbone,
ConvNextForImageClassification,
ConvNextModel,
ConvNextPreTrainedModel,
)
try:
if not is_tf_available():
raise OptionalDependencyNotAvailable()
except OptionalDependencyNotAvailable:
pass
else:
from .modeling_tf_convnext import TFConvNextForImageClassification, TFConvNextModel, TFConvNextPreTrainedModel
else:
import sys
__magic_name__ = _LazyModule(__name__, globals()['__file__'], _import_structure)
| 27 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : List[Any] , lowerCamelCase : Tuple):
A_ : List[str] = """"""
for i in table:
res += inp[i - 1]
return res
def lowerCamelCase ( lowerCamelCase : List[Any]):
return data[1:] + data[0]
def lowerCamelCase ( lowerCamelCase : Union[str, Any] , lowerCamelCase : Optional[Any]):
A_ : List[Any] = """"""
for i in range(len(lowerCamelCase)):
if a[i] == b[i]:
res += "0"
else:
res += "1"
return res
def lowerCamelCase ( lowerCamelCase : Optional[int] , lowerCamelCase : int):
A_ : str = int("""0b""" + data[0] + data[-1] , 2)
A_ : Tuple = int("""0b""" + data[1:3] , 2)
return bin(s[row][col])[2:]
def lowerCamelCase ( lowerCamelCase : Optional[Any] , lowerCamelCase : Tuple , lowerCamelCase : List[Any] , lowerCamelCase : int , lowerCamelCase : Tuple):
A_ : Any = message[:4]
A_ : Optional[Any] = message[4:]
A_ : Any = apply_table(lowerCamelCase , lowerCamelCase)
A_ : str = xor(lowerCamelCase , lowerCamelCase)
A_ : Tuple = apply_sbox(lowerCamelCase , temp[:4]) # noqa: E741
A_ : Any = apply_sbox(lowerCamelCase , temp[4:])
A_ : List[str] = """0""" * (2 - len(lowerCamelCase)) + l # noqa: E741
A_ : Tuple = """0""" * (2 - len(lowerCamelCase)) + r
A_ : Optional[Any] = apply_table(l + r , lowerCamelCase)
A_ : Union[str, Any] = xor(lowerCamelCase , lowerCamelCase)
return temp + right
if __name__ == "__main__":
__magic_name__ = input('Enter 10 bit key: ')
__magic_name__ = input('Enter 8 bit message: ')
__magic_name__ = [6, 3, 7, 4, 8, 5, 10, 9]
__magic_name__ = [3, 5, 2, 7, 4, 10, 1, 9, 8, 6]
__magic_name__ = [2, 4, 3, 1]
__magic_name__ = [2, 6, 3, 1, 4, 8, 5, 7]
__magic_name__ = [4, 1, 3, 5, 7, 2, 8, 6]
__magic_name__ = [4, 1, 2, 3, 2, 3, 4, 1]
__magic_name__ = [[1, 0, 3, 2], [3, 2, 1, 0], [0, 2, 1, 3], [3, 1, 3, 2]]
__magic_name__ = [[0, 1, 2, 3], [2, 0, 1, 3], [3, 0, 1, 0], [2, 1, 0, 3]]
# key generation
__magic_name__ = apply_table(key, paa_table)
__magic_name__ = temp[:5]
__magic_name__ = temp[5:]
__magic_name__ = left_shift(left)
__magic_name__ = left_shift(right)
__magic_name__ = apply_table(left + right, pa_table)
__magic_name__ = left_shift(left)
__magic_name__ = left_shift(right)
__magic_name__ = left_shift(left)
__magic_name__ = left_shift(right)
__magic_name__ = apply_table(left + right, pa_table)
# encryption
__magic_name__ = apply_table(message, IP)
__magic_name__ = function(expansion, sa, sa, keya, temp)
__magic_name__ = temp[4:] + temp[:4]
__magic_name__ = function(expansion, sa, sa, keya, temp)
__magic_name__ = apply_table(temp, IP_inv)
print('Cipher text is:', CT)
# decryption
__magic_name__ = apply_table(CT, IP)
__magic_name__ = function(expansion, sa, sa, keya, temp)
__magic_name__ = temp[4:] + temp[:4]
__magic_name__ = function(expansion, sa, sa, keya, temp)
__magic_name__ = apply_table(temp, IP_inv)
print('Plain text after decypting is:', PT)
| 27 |
'''simple docstring'''
import copy
import os
from collections import OrderedDict
from typing import TYPE_CHECKING, Any, Dict, Mapping, Optional, Union
if TYPE_CHECKING:
from ...processing_utils import ProcessorMixin
from ...utils import TensorType
from ...configuration_utils import PretrainedConfig
from ...onnx import OnnxConfig
from ...utils import logging
__magic_name__ = logging.get_logger(__name__)
__magic_name__ = {
'google/owlvit-base-patch32': 'https://huggingface.co/google/owlvit-base-patch32/resolve/main/config.json',
'google/owlvit-base-patch16': 'https://huggingface.co/google/owlvit-base-patch16/resolve/main/config.json',
'google/owlvit-large-patch14': 'https://huggingface.co/google/owlvit-large-patch14/resolve/main/config.json',
}
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_text_model"""
def __init__( self : Union[str, Any] ,_a : Any=49408 ,_a : Any=512 ,_a : Tuple=2048 ,_a : Dict=12 ,_a : Optional[int]=8 ,_a : Tuple=16 ,_a : Tuple="quick_gelu" ,_a : Optional[Any]=1e-5 ,_a : List[Any]=0.0 ,_a : Optional[int]=0.02 ,_a : Dict=1.0 ,_a : Dict=0 ,_a : Any=49406 ,_a : Tuple=49407 ,**_a : List[Any] ,):
'''simple docstring'''
super().__init__(pad_token_id=_a ,bos_token_id=_a ,eos_token_id=_a ,**_a )
A_ : Tuple = vocab_size
A_ : int = hidden_size
A_ : Optional[int] = intermediate_size
A_ : Optional[int] = num_hidden_layers
A_ : Union[str, Any] = num_attention_heads
A_ : int = max_position_embeddings
A_ : str = hidden_act
A_ : Union[str, Any] = layer_norm_eps
A_ : Tuple = attention_dropout
A_ : Union[str, Any] = initializer_range
A_ : List[Any] = initializer_factor
@classmethod
def _a ( cls : List[str] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : int = cls.get_config_dict(_a ,**_a )
# get the text config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : Union[str, Any] = config_dict["""text_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit_vision_model"""
def __init__( self : List[Any] ,_a : Optional[Any]=768 ,_a : Tuple=3072 ,_a : Dict=12 ,_a : int=12 ,_a : Dict=3 ,_a : Tuple=768 ,_a : int=32 ,_a : int="quick_gelu" ,_a : List[Any]=1e-5 ,_a : Tuple=0.0 ,_a : List[Any]=0.02 ,_a : str=1.0 ,**_a : int ,):
'''simple docstring'''
super().__init__(**_a )
A_ : List[str] = hidden_size
A_ : Union[str, Any] = intermediate_size
A_ : Union[str, Any] = num_hidden_layers
A_ : Optional[Any] = num_attention_heads
A_ : int = num_channels
A_ : str = image_size
A_ : List[Any] = patch_size
A_ : int = hidden_act
A_ : List[Any] = layer_norm_eps
A_ : List[str] = attention_dropout
A_ : str = initializer_range
A_ : str = initializer_factor
@classmethod
def _a ( cls : List[Any] ,_a : Union[str, os.PathLike] ,**_a : str ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : Optional[int] = cls.get_config_dict(_a ,**_a )
# get the vision config dict if we are loading from OwlViTConfig
if config_dict.get("""model_type""" ) == "owlvit":
A_ : List[str] = config_dict["""vision_config"""]
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = """owlvit"""
a_ = True
def __init__( self : Union[str, Any] ,_a : List[str]=None ,_a : List[str]=None ,_a : Dict=512 ,_a : List[Any]=2.6592 ,_a : Optional[Any]=True ,**_a : Optional[int] ,):
'''simple docstring'''
super().__init__(**_a )
if text_config is None:
A_ : List[Any] = {}
logger.info("""text_config is None. Initializing the OwlViTTextConfig with default values.""" )
if vision_config is None:
A_ : Tuple = {}
logger.info("""vision_config is None. initializing the OwlViTVisionConfig with default values.""" )
A_ : Dict = OwlViTTextConfig(**_a )
A_ : Dict = OwlViTVisionConfig(**_a )
A_ : Any = projection_dim
A_ : Optional[int] = logit_scale_init_value
A_ : Optional[int] = return_dict
A_ : Dict = 1.0
@classmethod
def _a ( cls : Union[str, Any] ,_a : Union[str, os.PathLike] ,**_a : Optional[int] ):
'''simple docstring'''
cls._set_token_in_kwargs(_a )
A_ , A_ : List[Any] = cls.get_config_dict(_a ,**_a )
if "model_type" in config_dict and hasattr(cls ,"""model_type""" ) and config_dict["model_type"] != cls.model_type:
logger.warning(
f'You are using a model of type {config_dict["model_type"]} to instantiate a model of type '
f'{cls.model_type}. This is not supported for all configurations of models and can yield errors.' )
return cls.from_dict(_a ,**_a )
@classmethod
def _a ( cls : int ,_a : Dict ,_a : Dict ,**_a : List[str] ):
'''simple docstring'''
A_ : str = {}
A_ : int = text_config
A_ : Union[str, Any] = vision_config
return cls.from_dict(_a ,**_a )
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : Dict = copy.deepcopy(self.__dict__ )
A_ : str = self.text_config.to_dict()
A_ : Optional[int] = self.vision_config.to_dict()
A_ : List[Any] = self.__class__.model_type
return output
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
@property
def _a ( self : int ):
'''simple docstring'''
return OrderedDict(
[
("""input_ids""", {0: """batch""", 1: """sequence"""}),
("""pixel_values""", {0: """batch""", 1: """num_channels""", 2: """height""", 3: """width"""}),
("""attention_mask""", {0: """batch""", 1: """sequence"""}),
] )
@property
def _a ( self : str ):
'''simple docstring'''
return OrderedDict(
[
("""logits_per_image""", {0: """batch"""}),
("""logits_per_text""", {0: """batch"""}),
("""text_embeds""", {0: """batch"""}),
("""image_embeds""", {0: """batch"""}),
] )
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 1e-4
def _a ( self : int ,_a : "ProcessorMixin" ,_a : int = -1 ,_a : int = -1 ,_a : Optional["TensorType"] = None ,):
'''simple docstring'''
A_ : Any = super().generate_dummy_inputs(
processor.tokenizer ,batch_size=_a ,seq_length=_a ,framework=_a )
A_ : Any = super().generate_dummy_inputs(
processor.image_processor ,batch_size=_a ,framework=_a )
return {**text_input_dict, **image_input_dict}
@property
def _a ( self : Optional[Any] ):
'''simple docstring'''
return 14
| 27 | 1 |
'''simple docstring'''
from ..utils import DummyObject, requires_backends
class __lowerCAmelCase ( metaclass=__SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""torch""", """torchsde"""]
def __init__( self : Any ,*_a : Union[str, Any] ,**_a : Optional[int] ):
'''simple docstring'''
requires_backends(self ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : Optional[int] ,*_a : List[Any] ,**_a : Any ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
@classmethod
def _a ( cls : List[Any] ,*_a : Tuple ,**_a : Union[str, Any] ):
'''simple docstring'''
requires_backends(cls ,["""torch""", """torchsde"""] )
| 27 |
'''simple docstring'''
import copy
from typing import Any, Dict, List, Optional, Union
import numpy as np
import torch
from ...audio_utils import mel_filter_bank, spectrogram, window_function
from ...feature_extraction_sequence_utils import SequenceFeatureExtractor
from ...feature_extraction_utils import BatchFeature
from ...utils import TensorType, logging
__magic_name__ = logging.get_logger(__name__)
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = ["""input_features""", """is_longer"""]
def __init__( self : Dict ,_a : Optional[int]=64 ,_a : List[Any]=48000 ,_a : str=480 ,_a : Optional[Any]=10 ,_a : Optional[int]=1024 ,_a : Tuple=0.0 ,_a : str=False ,_a : float = 0 ,_a : float = 14000 ,_a : int = None ,_a : str = "fusion" ,_a : str = "repeatpad" ,**_a : Tuple ,):
'''simple docstring'''
super().__init__(
feature_size=_a ,sampling_rate=_a ,padding_value=_a ,return_attention_mask=_a ,**_a ,)
A_ : Tuple = top_db
A_ : Tuple = truncation
A_ : Optional[Any] = padding
A_ : Optional[int] = fft_window_size
A_ : Dict = (fft_window_size >> 1) + 1
A_ : Any = hop_length
A_ : List[Any] = max_length_s
A_ : Tuple = max_length_s * sampling_rate
A_ : Tuple = sampling_rate
A_ : Optional[int] = frequency_min
A_ : Tuple = frequency_max
A_ : Tuple = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm=_a ,mel_scale="""htk""" ,)
A_ : Dict = mel_filter_bank(
num_frequency_bins=self.nb_frequency_bins ,num_mel_filters=_a ,min_frequency=_a ,max_frequency=_a ,sampling_rate=_a ,norm="""slaney""" ,mel_scale="""slaney""" ,)
def _a ( self : int ):
'''simple docstring'''
A_ : int = copy.deepcopy(self.__dict__ )
A_ : Tuple = self.__class__.__name__
if "mel_filters" in output:
del output["mel_filters"]
if "mel_filters_slaney" in output:
del output["mel_filters_slaney"]
return output
def _a ( self : Dict ,_a : np.array ,_a : Optional[np.array] = None ):
'''simple docstring'''
A_ : List[str] = spectrogram(
_a ,window_function(self.fft_window_size ,"""hann""" ) ,frame_length=self.fft_window_size ,hop_length=self.hop_length ,power=2.0 ,mel_filters=_a ,log_mel="""dB""" ,)
return log_mel_spectrogram.T
def _a ( self : Optional[int] ,_a : Dict ,_a : Optional[Any] ,_a : Optional[int] ):
'''simple docstring'''
A_ : Dict = np.array_split(list(range(0 ,total_frames - chunk_frames + 1 ) ) ,3 )
if len(ranges[1] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : List[Any] = [0]
if len(ranges[2] ) == 0:
# if the audio is too short, we just use the first chunk
A_ : int = [0]
# randomly choose index for each part
A_ : List[str] = np.random.choice(ranges[0] )
A_ : int = np.random.choice(ranges[1] )
A_ : Optional[int] = np.random.choice(ranges[2] )
A_ : Tuple = mel[idx_front : idx_front + chunk_frames, :]
A_ : Dict = mel[idx_middle : idx_middle + chunk_frames, :]
A_ : Dict = mel[idx_back : idx_back + chunk_frames, :]
A_ : Optional[int] = torch.tensor(mel[None, None, :] )
A_ : Dict = torch.nn.functional.interpolate(
_a ,size=[chunk_frames, 64] ,mode="""bilinear""" ,align_corners=_a )
A_ : str = mel_shrink[0][0].numpy()
A_ : Tuple = np.stack([mel_shrink, mel_chunk_front, mel_chunk_middle, mel_chunk_back] ,axis=0 )
return mel_fusion
def _a ( self : Dict ,_a : np.array ,_a : Optional[Any] ,_a : int ,_a : Dict ):
'''simple docstring'''
if waveform.shape[0] > max_length:
if truncation == "rand_trunc":
A_ : Dict = True
# random crop to max_length (for compatibility) -> this should be handled by self.pad
A_ : Tuple = len(_a ) - max_length
A_ : Optional[int] = np.random.randint(0 ,overflow + 1 )
A_ : List[Any] = waveform[idx : idx + max_length]
A_ : Optional[Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
elif truncation == "fusion":
A_ : Dict = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Tuple = max_length // self.hop_length + 1 # the +1 related to how the spectrogram is computed
A_ : Optional[int] = mel.shape[0]
if chunk_frames == total_frames:
# there is a corner case where the audio length is larger than max_length but smaller than max_length+hop_length.
# In this case, we just use the whole audio.
A_ : Optional[int] = np.stack([mel, mel, mel, mel] ,axis=0 )
A_ : str = False
else:
A_ : str = self._random_mel_fusion(_a ,_a ,_a )
A_ : Optional[Any] = True
else:
raise NotImplementedError(f'data_truncating {truncation} not implemented' )
else:
A_ : Optional[int] = False
# only use repeat as a new possible value for padding. you repeat the audio before applying the usual max_length padding
if waveform.shape[0] < max_length:
if padding == "repeat":
A_ : int = int(max_length / len(_a ) )
A_ : Any = np.stack(np.tile(_a ,n_repeat + 1 ) )[:max_length]
if padding == "repeatpad":
A_ : List[str] = int(max_length / len(_a ) )
A_ : Optional[Any] = np.stack(np.tile(_a ,_a ) )
A_ : Any = np.pad(_a ,(0, max_length - waveform.shape[0]) ,mode="""constant""" ,constant_values=0 )
if truncation == "fusion":
A_ : List[Any] = self._np_extract_fbank_features(_a ,self.mel_filters )
A_ : Optional[Any] = np.stack([input_mel, input_mel, input_mel, input_mel] ,axis=0 )
else:
A_ : Union[str, Any] = self._np_extract_fbank_features(_a ,self.mel_filters_slaney )[None, :]
return input_mel, longer
def __call__( self : List[Any] ,_a : Union[np.ndarray, List[float], List[np.ndarray], List[List[float]]] ,_a : str = None ,_a : Optional[str] = None ,_a : Optional[int] = None ,_a : Optional[int] = None ,_a : Optional[Union[str, TensorType]] = None ,**_a : Any ,):
'''simple docstring'''
A_ : List[str] = truncation if truncation is not None else self.truncation
A_ : List[Any] = padding if padding else self.padding
if sampling_rate is not None:
if sampling_rate != self.sampling_rate:
raise ValueError(
f'The model corresponding to this feature extractor: {self.__class__.__name__} was trained using a'
f' sampling rate of {self.sampling_rate}. Please make sure that the provided `raw_speech` input'
f' was sampled with {self.sampling_rate} and not {sampling_rate}.' )
else:
logger.warning(
"""It is strongly recommended to pass the `sampling_rate` argument to this function. """
"""Failing to do so can result in silent errors that might be hard to debug.""" )
A_ : Any = isinstance(_a ,np.ndarray ) and len(raw_speech.shape ) > 1
if is_batched_numpy and len(raw_speech.shape ) > 2:
raise ValueError(f'Only mono-channel audio is supported for input to {self}' )
A_ : int = is_batched_numpy or (
isinstance(_a ,(list, tuple) ) and (isinstance(raw_speech[0] ,(np.ndarray, tuple, list) ))
)
if is_batched:
A_ : Optional[int] = [np.asarray(_a ,dtype=np.floataa ) for speech in raw_speech]
elif not is_batched and not isinstance(_a ,np.ndarray ):
A_ : str = np.asarray(_a ,dtype=np.floataa )
elif isinstance(_a ,np.ndarray ) and raw_speech.dtype is np.dtype(np.floataa ):
A_ : Tuple = raw_speech.astype(np.floataa )
# always return batch
if not is_batched:
A_ : Any = [np.asarray(_a )]
# convert to mel spectrogram, truncate and pad if needed.
A_ : str = [
self._get_input_mel(_a ,max_length if max_length else self.nb_max_samples ,_a ,_a )
for waveform in raw_speech
]
A_ : int = []
A_ : Any = []
for mel, longer in padded_inputs:
input_mel.append(_a )
is_longer.append(_a )
if truncation == "fusion" and sum(_a ) == 0:
# if no audio is longer than 10s, then randomly select one audio to be longer
A_ : List[Any] = np.random.randint(0 ,len(_a ) )
A_ : List[str] = True
if isinstance(input_mel[0] ,_a ):
A_ : Tuple = [np.asarray(_a ,dtype=np.floataa ) for feature in input_mel]
# is_longer is a list of bool
A_ : List[str] = [[longer] for longer in is_longer]
A_ : Optional[Any] = {"""input_features""": input_mel, """is_longer""": is_longer}
A_ : int = BatchFeature(_a )
if return_tensors is not None:
A_ : int = input_features.convert_to_tensors(_a )
return input_features
| 27 | 1 |
'''simple docstring'''
import torch
from diffusers import CMStochasticIterativeScheduler
from .test_schedulers import SchedulerCommonTest
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE ):
'''simple docstring'''
a_ = (CMStochasticIterativeScheduler,)
a_ = 10
def _a ( self : Dict ,**_a : int ):
'''simple docstring'''
A_ : Optional[Any] = {
"""num_train_timesteps""": 201,
"""sigma_min""": 0.002,
"""sigma_max""": 80.0,
}
config.update(**_a )
return config
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Any = 10
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Union[str, Any] = self.scheduler_classes[0](**_a )
scheduler.set_timesteps(_a )
A_ : List[str] = scheduler.timesteps[0]
A_ : Union[str, Any] = scheduler.timesteps[1]
A_ : Optional[Any] = self.dummy_sample
A_ : int = 0.1 * sample
A_ : Optional[Any] = scheduler.step(_a ,_a ,_a ).prev_sample
A_ : str = scheduler.step(_a ,_a ,_a ).prev_sample
self.assertEqual(output_a.shape ,sample.shape )
self.assertEqual(output_a.shape ,output_a.shape )
def _a ( self : Any ):
'''simple docstring'''
for timesteps in [10, 50, 100, 1000]:
self.check_over_configs(num_train_timesteps=_a )
def _a ( self : int ):
'''simple docstring'''
for clip_denoised in [True, False]:
self.check_over_configs(clip_denoised=_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.scheduler_classes[0]
A_ : Any = self.get_scheduler_config()
A_ : List[Any] = scheduler_class(**_a )
A_ : Optional[int] = 1
scheduler.set_timesteps(_a )
A_ : List[str] = scheduler.timesteps
A_ : Tuple = torch.manual_seed(0 )
A_ : Dict = self.dummy_model()
A_ : Tuple = self.dummy_sample_deter * scheduler.init_noise_sigma
for i, t in enumerate(_a ):
# 1. scale model input
A_ : Union[str, Any] = scheduler.scale_model_input(_a ,_a )
# 2. predict noise residual
A_ : List[Any] = model(_a ,_a )
# 3. predict previous sample x_t-1
A_ : Any = scheduler.step(_a ,_a ,_a ,generator=_a ).prev_sample
A_ : str = pred_prev_sample
A_ : int = torch.sum(torch.abs(_a ) )
A_ : Optional[int] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 192.7614 ) < 1e-2
assert abs(result_mean.item() - 0.2510 ) < 1e-3
def _a ( self : int ):
'''simple docstring'''
A_ : List[Any] = self.scheduler_classes[0]
A_ : Tuple = self.get_scheduler_config()
A_ : List[Any] = scheduler_class(**_a )
A_ : List[str] = [106, 0]
scheduler.set_timesteps(timesteps=_a )
A_ : List[Any] = scheduler.timesteps
A_ : Any = torch.manual_seed(0 )
A_ : Optional[int] = self.dummy_model()
A_ : List[str] = self.dummy_sample_deter * scheduler.init_noise_sigma
for t in timesteps:
# 1. scale model input
A_ : Any = scheduler.scale_model_input(_a ,_a )
# 2. predict noise residual
A_ : Tuple = model(_a ,_a )
# 3. predict previous sample x_t-1
A_ : Optional[Any] = scheduler.step(_a ,_a ,_a ,generator=_a ).prev_sample
A_ : Any = pred_prev_sample
A_ : List[str] = torch.sum(torch.abs(_a ) )
A_ : List[Any] = torch.mean(torch.abs(_a ) )
assert abs(result_sum.item() - 347.6357 ) < 1e-2
assert abs(result_mean.item() - 0.4527 ) < 1e-3
def _a ( self : Optional[int] ):
'''simple docstring'''
A_ : str = self.scheduler_classes[0]
A_ : Union[str, Any] = self.get_scheduler_config()
A_ : int = scheduler_class(**_a )
A_ : Tuple = [39, 30, 12, 15, 0]
with self.assertRaises(_a ,msg="""`timesteps` must be in descending order.""" ):
scheduler.set_timesteps(timesteps=_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Dict = self.scheduler_classes[0]
A_ : str = self.get_scheduler_config()
A_ : List[str] = scheduler_class(**_a )
A_ : Union[str, Any] = [39, 30, 12, 1, 0]
A_ : int = len(_a )
with self.assertRaises(_a ,msg="""Can only pass one of `num_inference_steps` or `timesteps`.""" ):
scheduler.set_timesteps(num_inference_steps=_a ,timesteps=_a )
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : Tuple = self.scheduler_classes[0]
A_ : Optional[Any] = self.get_scheduler_config()
A_ : Tuple = scheduler_class(**_a )
A_ : Any = [scheduler.config.num_train_timesteps]
with self.assertRaises(
_a ,msg="""`timesteps` must start before `self.config.train_timesteps`: {scheduler.config.num_train_timesteps}}""" ,):
scheduler.set_timesteps(timesteps=_a )
| 27 |
'''simple docstring'''
import unittest
from transformers import is_torch_available
from transformers.testing_utils import require_torch, slow, torch_device
from ...generation.test_utils import GenerationTesterMixin
from ...test_configuration_common import ConfigTester
from ...test_modeling_common import ModelTesterMixin, ids_tensor
from ...test_pipeline_mixin import PipelineTesterMixin
if is_torch_available():
import torch
from transformers import (
OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST,
OpenAIGPTConfig,
OpenAIGPTDoubleHeadsModel,
OpenAIGPTForSequenceClassification,
OpenAIGPTLMHeadModel,
OpenAIGPTModel,
)
class __lowerCAmelCase :
'''simple docstring'''
def __init__( self : Optional[int] ,_a : List[Any] ,_a : Dict=13 ,_a : List[Any]=7 ,_a : Optional[Any]=True ,_a : Any=True ,_a : Optional[int]=True ,_a : Union[str, Any]=99 ,_a : Union[str, Any]=32 ,_a : List[str]=5 ,_a : List[str]=4 ,_a : Dict=37 ,_a : List[Any]="gelu" ,_a : int=0.1 ,_a : Optional[int]=0.1 ,_a : Tuple=512 ,_a : Union[str, Any]=16 ,_a : Optional[Any]=2 ,_a : Optional[Any]=0.02 ,_a : Optional[int]=3 ,_a : str=4 ,_a : Optional[Any]=None ,):
'''simple docstring'''
A_ : Optional[Any] = parent
A_ : str = batch_size
A_ : int = seq_length
A_ : Union[str, Any] = is_training
A_ : Optional[Any] = use_token_type_ids
A_ : int = use_labels
A_ : Dict = vocab_size
A_ : List[Any] = hidden_size
A_ : Tuple = num_hidden_layers
A_ : Optional[int] = num_attention_heads
A_ : int = intermediate_size
A_ : Tuple = hidden_act
A_ : int = hidden_dropout_prob
A_ : Dict = attention_probs_dropout_prob
A_ : Any = max_position_embeddings
A_ : Optional[Any] = type_vocab_size
A_ : Tuple = type_sequence_label_size
A_ : int = initializer_range
A_ : Optional[Any] = num_labels
A_ : str = num_choices
A_ : Optional[Any] = scope
A_ : List[Any] = self.vocab_size - 1
def _a ( self : Any ):
'''simple docstring'''
A_ : List[Any] = ids_tensor([self.batch_size, self.seq_length] ,self.vocab_size )
A_ : List[Any] = None
if self.use_token_type_ids:
A_ : Optional[int] = ids_tensor([self.batch_size, self.seq_length] ,self.type_vocab_size )
A_ : int = None
A_ : str = None
A_ : Union[str, Any] = None
if self.use_labels:
A_ : Optional[int] = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : str = ids_tensor([self.batch_size, self.seq_length] ,self.num_labels )
A_ : Any = ids_tensor([self.batch_size] ,self.num_choices )
A_ : List[Any] = OpenAIGPTConfig(
vocab_size=self.vocab_size ,n_embd=self.hidden_size ,n_layer=self.num_hidden_layers ,n_head=self.num_attention_heads ,n_positions=self.max_position_embeddings ,pad_token_id=self.pad_token_id ,)
A_ : Tuple = ids_tensor([self.num_hidden_layers, self.num_attention_heads] ,2 )
return (
config,
input_ids,
head_mask,
token_type_ids,
sequence_labels,
token_labels,
choice_labels,
)
def _a ( self : Optional[int] ,_a : List[str] ,_a : str ,_a : int ,_a : int ,*_a : Union[str, Any] ):
'''simple docstring'''
A_ : Optional[Any] = OpenAIGPTModel(config=_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,head_mask=_a )
A_ : str = model(_a ,token_type_ids=_a )
A_ : Dict = model(_a )
self.parent.assertEqual(result.last_hidden_state.shape ,(self.batch_size, self.seq_length, self.hidden_size) )
def _a ( self : Dict ,_a : Optional[int] ,_a : Union[str, Any] ,_a : Dict ,_a : List[str] ,*_a : str ):
'''simple docstring'''
A_ : str = OpenAIGPTLMHeadModel(_a )
model.to(_a )
model.eval()
A_ : Any = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : Any ,_a : Dict ,_a : List[Any] ,_a : Dict ,_a : Union[str, Any] ,*_a : str ):
'''simple docstring'''
A_ : Any = OpenAIGPTDoubleHeadsModel(_a )
model.to(_a )
model.eval()
A_ : Optional[int] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.loss.shape ,() )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.seq_length, self.vocab_size) )
def _a ( self : List[str] ,_a : str ,_a : Tuple ,_a : Dict ,_a : Tuple ,*_a : Dict ):
'''simple docstring'''
A_ : List[str] = self.num_labels
A_ : int = OpenAIGPTForSequenceClassification(_a )
model.to(_a )
model.eval()
A_ : Dict = ids_tensor([self.batch_size] ,self.type_sequence_label_size )
A_ : Optional[Any] = model(_a ,token_type_ids=_a ,labels=_a )
self.parent.assertEqual(result.logits.shape ,(self.batch_size, self.num_labels) )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Union[str, Any] = self.prepare_config_and_inputs()
(
(
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) , (
A_
) ,
) : str = config_and_inputs
A_ : int = {
"""input_ids""": input_ids,
"""token_type_ids""": token_type_ids,
"""head_mask""": head_mask,
}
return config, inputs_dict
@require_torch
class __lowerCAmelCase ( __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , __SCREAMING_SNAKE_CASE , unittest.TestCase ):
'''simple docstring'''
a_ = (
(OpenAIGPTModel, OpenAIGPTLMHeadModel, OpenAIGPTDoubleHeadsModel, OpenAIGPTForSequenceClassification)
if is_torch_available()
else ()
)
a_ = (
(OpenAIGPTLMHeadModel,) if is_torch_available() else ()
) # TODO (PVP): Add Double HeadsModel when generate() function is changed accordingly
a_ = (
{
"""feature-extraction""": OpenAIGPTModel,
"""text-classification""": OpenAIGPTForSequenceClassification,
"""text-generation""": OpenAIGPTLMHeadModel,
"""zero-shot""": OpenAIGPTForSequenceClassification,
}
if is_torch_available()
else {}
)
def _a ( self : Tuple ,_a : Optional[int] ,_a : str ,_a : List[str] ,_a : List[str] ,_a : Any ):
'''simple docstring'''
if pipeline_test_casse_name == "ZeroShotClassificationPipelineTests":
# Get `tokenizer does not have a padding token` error for both fast/slow tokenizers.
# `OpenAIGPTConfig` was never used in pipeline tests, either because of a missing checkpoint or because a
# tiny config could not be created.
return True
return False
def _a ( self : Optional[int] ,_a : str ,_a : Dict ,_a : Optional[int]=False ):
'''simple docstring'''
A_ : Any = super()._prepare_for_class(_a ,_a ,return_labels=_a )
if return_labels:
if model_class.__name__ == "OpenAIGPTDoubleHeadsModel":
A_ : Union[str, Any] = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices, self.model_tester.seq_length) ,dtype=torch.long ,device=_a ,)
A_ : Any = inputs_dict["""labels"""]
A_ : Any = inputs_dict["""labels"""]
A_ : Tuple = torch.zeros(
(self.model_tester.batch_size, self.model_tester.num_choices) ,dtype=torch.long ,device=_a ,)
A_ : int = torch.zeros(
self.model_tester.batch_size ,dtype=torch.long ,device=_a )
return inputs_dict
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : Tuple = OpenAIGPTModelTester(self )
A_ : Optional[int] = ConfigTester(self ,config_class=_a ,n_embd=37 )
def _a ( self : Any ):
'''simple docstring'''
self.config_tester.run_common_tests()
def _a ( self : Optional[Any] ):
'''simple docstring'''
A_ : int = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_model(*_a )
def _a ( self : Tuple ):
'''simple docstring'''
A_ : Dict = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_lm_head_model(*_a )
def _a ( self : List[Any] ):
'''simple docstring'''
A_ : str = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_double_lm_head_model(*_a )
def _a ( self : Union[str, Any] ):
'''simple docstring'''
A_ : List[Any] = self.model_tester.prepare_config_and_inputs()
self.model_tester.create_and_check_openai_gpt_for_sequence_classification(*_a )
@slow
def _a ( self : List[Any] ):
'''simple docstring'''
for model_name in OPENAI_GPT_PRETRAINED_MODEL_ARCHIVE_LIST[:1]:
A_ : Union[str, Any] = OpenAIGPTModel.from_pretrained(_a )
self.assertIsNotNone(_a )
@require_torch
class __lowerCAmelCase ( unittest.TestCase ):
'''simple docstring'''
@slow
def _a ( self : List[str] ):
'''simple docstring'''
A_ : Dict = OpenAIGPTLMHeadModel.from_pretrained("""openai-gpt""" )
model.to(_a )
A_ : Dict = torch.tensor([[481, 4735, 544]] ,dtype=torch.long ,device=_a ) # the president is
A_ : Dict = [
481,
4735,
544,
246,
963,
870,
762,
239,
244,
40477,
244,
249,
719,
881,
487,
544,
240,
244,
603,
481,
] # the president is a very good man. " \n " i\'m sure he is, " said the
A_ : int = model.generate(_a ,do_sample=_a )
self.assertListEqual(output_ids[0].tolist() ,_a )
| 27 | 1 |
'''simple docstring'''
def lowerCamelCase ( lowerCamelCase : int):
if not isinstance(lowerCamelCase , lowerCamelCase):
raise ValueError("""check_bouncy() accepts only integer arguments""")
A_ : List[str] = str(lowerCamelCase)
A_ : Union[str, Any] = """""".join(sorted(lowerCamelCase))
return sorted_str_n != str_n and sorted_str_n[::-1] != str_n
def lowerCamelCase ( lowerCamelCase : float = 99):
if not 0 < percent < 100:
raise ValueError("""solution() only accepts values from 0 to 100""")
A_ : Optional[int] = 0
A_ : int = 1
while True:
if check_bouncy(lowerCamelCase):
bouncy_num += 1
if (bouncy_num / num) * 100 >= percent:
return num
num += 1
if __name__ == "__main__":
from doctest import testmod
testmod()
print(f"""{solution(99)}""")
| 27 |
'''simple docstring'''
import baseaa
def lowerCamelCase ( lowerCamelCase : str):
return baseaa.aaaencode(string.encode("""utf-8"""))
def lowerCamelCase ( lowerCamelCase : bytes):
return baseaa.aaadecode(lowerCamelCase).decode("""utf-8""")
if __name__ == "__main__":
import doctest
doctest.testmod()
| 27 | 1 |
'''simple docstring'''
import os
def lowerCamelCase ( ):
with open(os.path.dirname(SCREAMING_SNAKE_CASE_) + """/grid.txt""") as f:
A_ : Optional[int] = [] # noqa: E741
for _ in range(20):
l.append([int(SCREAMING_SNAKE_CASE_) for x in f.readline().split()])
A_ : str = 0
# right
for i in range(20):
for j in range(17):
A_ : Union[str, Any] = l[i][j] * l[i][j + 1] * l[i][j + 2] * l[i][j + 3]
if temp > maximum:
A_ : Tuple = temp
# down
for i in range(17):
for j in range(20):
A_ : int = l[i][j] * l[i + 1][j] * l[i + 2][j] * l[i + 3][j]
if temp > maximum:
A_ : Dict = temp
# diagonal 1
for i in range(17):
for j in range(17):
A_ : str = l[i][j] * l[i + 1][j + 1] * l[i + 2][j + 2] * l[i + 3][j + 3]
if temp > maximum:
A_ : Optional[Any] = temp
# diagonal 2
for i in range(17):
for j in range(3 , 20):
A_ : List[Any] = l[i][j] * l[i + 1][j - 1] * l[i + 2][j - 2] * l[i + 3][j - 3]
if temp > maximum:
A_ : Any = temp
return maximum
if __name__ == "__main__":
print(solution())
| 700 |
'''simple docstring'''
import argparse
import json
from typing import List
from ltp import LTP
from transformers.models.bert.tokenization_bert import BertTokenizer
def lowerCamelCase ( lowerCamelCase : Optional[Any]):
# This defines a "chinese character" as anything in the CJK Unicode block:
# https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block)
#
# Note that the CJK Unicode block is NOT all Japanese and Korean characters,
# despite its name. The modern Korean Hangul alphabet is a different block,
# as is Japanese Hiragana and Katakana. Those alphabets are used to write
# space-separated words, so they are not treated specially and handled
# like the all of the other languages.
if (
(cp >= 0X4_E_0_0 and cp <= 0X9_F_F_F)
or (cp >= 0X3_4_0_0 and cp <= 0X4_D_B_F) #
or (cp >= 0X2_0_0_0_0 and cp <= 0X2_A_6_D_F) #
or (cp >= 0X2_A_7_0_0 and cp <= 0X2_B_7_3_F) #
or (cp >= 0X2_B_7_4_0 and cp <= 0X2_B_8_1_F) #
or (cp >= 0X2_B_8_2_0 and cp <= 0X2_C_E_A_F) #
or (cp >= 0XF_9_0_0 and cp <= 0XF_A_F_F)
or (cp >= 0X2_F_8_0_0 and cp <= 0X2_F_A_1_F) #
): #
return True
return False
def lowerCamelCase ( lowerCamelCase : str):
# word like '180' or '身高' or '神'
for char in word:
A_ : Optional[Any] = ord(lowerCamelCase)
if not _is_chinese_char(lowerCamelCase):
return 0
return 1
def lowerCamelCase ( lowerCamelCase : List[str]):
A_ : Any = set()
for token in tokens:
A_ : str = len(lowerCamelCase) > 1 and is_chinese(lowerCamelCase)
if chinese_word:
word_set.add(lowerCamelCase)
A_ : Any = list(lowerCamelCase)
return word_list
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : set()):
if not chinese_word_set:
return bert_tokens
A_ : Any = max([len(lowerCamelCase) for w in chinese_word_set])
A_ : str = bert_tokens
A_ , A_ : Any = 0, len(lowerCamelCase)
while start < end:
A_ : Tuple = True
if is_chinese(bert_word[start]):
A_ : List[str] = min(end - start , lowerCamelCase)
for i in range(lowerCamelCase , 1 , -1):
A_ : Tuple = """""".join(bert_word[start : start + i])
if whole_word in chinese_word_set:
for j in range(start + 1 , start + i):
A_ : Dict = """##""" + bert_word[j]
A_ : str = start + i
A_ : Dict = False
break
if single_word:
start += 1
return bert_word
def lowerCamelCase ( lowerCamelCase : List[str] , lowerCamelCase : LTP , lowerCamelCase : BertTokenizer):
A_ : Union[str, Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : List[Any] = ltp_tokenizer.pipeline(lines[i : i + 100] , tasks=["""cws"""]).cws
A_ : int = [get_chinese_word(lowerCamelCase) for r in res]
ltp_res.extend(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : List[Any] = []
for i in range(0 , len(lowerCamelCase) , 100):
A_ : Dict = bert_tokenizer(lines[i : i + 100] , add_special_tokens=lowerCamelCase , truncation=lowerCamelCase , max_length=512)
bert_res.extend(res["""input_ids"""])
assert len(lowerCamelCase) == len(lowerCamelCase)
A_ : Union[str, Any] = []
for input_ids, chinese_word in zip(lowerCamelCase , lowerCamelCase):
A_ : List[Any] = []
for id in input_ids:
A_ : List[Any] = bert_tokenizer._convert_id_to_token(lowerCamelCase)
input_tokens.append(lowerCamelCase)
A_ : int = add_sub_symbol(lowerCamelCase , lowerCamelCase)
A_ : str = []
# We only save pos of chinese subwords start with ##, which mean is part of a whole word.
for i, token in enumerate(lowerCamelCase):
if token[:2] == "##":
A_ : Optional[Any] = token[2:]
# save chinese tokens' pos
if len(lowerCamelCase) == 1 and _is_chinese_char(ord(lowerCamelCase)):
ref_id.append(lowerCamelCase)
ref_ids.append(lowerCamelCase)
assert len(lowerCamelCase) == len(lowerCamelCase)
return ref_ids
def lowerCamelCase ( lowerCamelCase : Tuple):
# For Chinese (Ro)Bert, the best result is from : RoBERTa-wwm-ext (https://github.com/ymcui/Chinese-BERT-wwm)
# If we want to fine-tune these model, we have to use same tokenizer : LTP (https://github.com/HIT-SCIR/ltp)
with open(args.file_name , """r""" , encoding="""utf-8""") as f:
A_ : Optional[int] = f.readlines()
A_ : Union[str, Any] = [line.strip() for line in data if len(lowerCamelCase) > 0 and not line.isspace()] # avoid delimiter like '\u2029'
A_ : Optional[Any] = LTP(args.ltp) # faster in GPU device
A_ : Dict = BertTokenizer.from_pretrained(args.bert)
A_ : str = prepare_ref(lowerCamelCase , lowerCamelCase , lowerCamelCase)
with open(args.save_path , """w""" , encoding="""utf-8""") as f:
A_ : Optional[Any] = [json.dumps(lowerCamelCase) + """\n""" for ref in ref_ids]
f.writelines(lowerCamelCase)
if __name__ == "__main__":
__magic_name__ = argparse.ArgumentParser(description='prepare_chinese_ref')
parser.add_argument(
'--file_name',
required=False,
type=str,
default='./resources/chinese-demo.txt',
help='file need process, same as training data in lm',
)
parser.add_argument(
'--ltp',
required=False,
type=str,
default='./resources/ltp',
help='resources for LTP tokenizer, usually a path',
)
parser.add_argument(
'--bert',
required=False,
type=str,
default='./resources/robert',
help='resources for Bert tokenizer',
)
parser.add_argument(
'--save_path',
required=False,
type=str,
default='./resources/ref.txt',
help='path to save res',
)
__magic_name__ = parser.parse_args()
main(args)
| 27 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.